1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* |
3 | * Renesas SuperH DMA Engine support |
4 | * |
5 | * base is drivers/dma/flsdma.c |
6 | * |
7 | * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de> |
8 | * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> |
9 | * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. |
10 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. |
11 | * |
12 | * - DMA of SuperH does not have Hardware DMA chain mode. |
13 | * - MAX DMA size is 16MB. |
14 | * |
15 | */ |
16 | |
17 | #include <linux/delay.h> |
18 | #include <linux/dmaengine.h> |
19 | #include <linux/err.h> |
20 | #include <linux/init.h> |
21 | #include <linux/interrupt.h> |
22 | #include <linux/kdebug.h> |
23 | #include <linux/module.h> |
24 | #include <linux/notifier.h> |
25 | #include <linux/of.h> |
26 | #include <linux/platform_device.h> |
27 | #include <linux/pm_runtime.h> |
28 | #include <linux/rculist.h> |
29 | #include <linux/sh_dma.h> |
30 | #include <linux/slab.h> |
31 | #include <linux/spinlock.h> |
32 | |
33 | #include "../dmaengine.h" |
34 | #include "shdma.h" |
35 | |
36 | /* DMA registers */ |
37 | #define SAR 0x00 /* Source Address Register */ |
38 | #define DAR 0x04 /* Destination Address Register */ |
39 | #define TCR 0x08 /* Transfer Count Register */ |
40 | #define CHCR 0x0C /* Channel Control Register */ |
41 | #define DMAOR 0x40 /* DMA Operation Register */ |
42 | |
43 | #define TEND 0x18 /* USB-DMAC */ |
44 | |
45 | #define SH_DMAE_DRV_NAME "sh-dma-engine" |
46 | |
47 | /* Default MEMCPY transfer size = 2^2 = 4 bytes */ |
48 | #define LOG2_DEFAULT_XFER_SIZE 2 |
49 | #define SH_DMA_SLAVE_NUMBER 256 |
50 | #define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1) |
51 | |
52 | /* |
53 | * Used for write-side mutual exclusion for the global device list, |
54 | * read-side synchronization by way of RCU, and per-controller data. |
55 | */ |
56 | static DEFINE_SPINLOCK(sh_dmae_lock); |
57 | static LIST_HEAD(sh_dmae_devices); |
58 | |
59 | /* |
60 | * Different DMAC implementations provide different ways to clear DMA channels: |
61 | * (1) none - no CHCLR registers are available |
62 | * (2) one CHCLR register per channel - 0 has to be written to it to clear |
63 | * channel buffers |
64 | * (3) one CHCLR per several channels - 1 has to be written to the bit, |
65 | * corresponding to the specific channel to reset it |
66 | */ |
67 | static void channel_clear(struct sh_dmae_chan *sh_dc) |
68 | { |
69 | struct sh_dmae_device *shdev = to_sh_dev(sh_dc); |
70 | const struct sh_dmae_channel *chan_pdata = shdev->pdata->channel + |
71 | sh_dc->shdma_chan.id; |
72 | u32 val = shdev->pdata->chclr_bitwise ? 1 << chan_pdata->chclr_bit : 0; |
73 | |
74 | __raw_writel(val, addr: shdev->chan_reg + chan_pdata->chclr_offset); |
75 | } |
76 | |
77 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) |
78 | { |
79 | __raw_writel(val: data, addr: sh_dc->base + reg); |
80 | } |
81 | |
82 | static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) |
83 | { |
84 | return __raw_readl(addr: sh_dc->base + reg); |
85 | } |
86 | |
87 | static u16 dmaor_read(struct sh_dmae_device *shdev) |
88 | { |
89 | void __iomem *addr = shdev->chan_reg + DMAOR; |
90 | |
91 | if (shdev->pdata->dmaor_is_32bit) |
92 | return __raw_readl(addr); |
93 | else |
94 | return __raw_readw(addr); |
95 | } |
96 | |
97 | static void dmaor_write(struct sh_dmae_device *shdev, u16 data) |
98 | { |
99 | void __iomem *addr = shdev->chan_reg + DMAOR; |
100 | |
101 | if (shdev->pdata->dmaor_is_32bit) |
102 | __raw_writel(val: data, addr); |
103 | else |
104 | __raw_writew(val: data, addr); |
105 | } |
106 | |
107 | static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data) |
108 | { |
109 | struct sh_dmae_device *shdev = to_sh_dev(sh_dc); |
110 | |
111 | __raw_writel(val: data, addr: sh_dc->base + shdev->chcr_offset); |
112 | } |
113 | |
114 | static u32 chcr_read(struct sh_dmae_chan *sh_dc) |
115 | { |
116 | struct sh_dmae_device *shdev = to_sh_dev(sh_dc); |
117 | |
118 | return __raw_readl(addr: sh_dc->base + shdev->chcr_offset); |
119 | } |
120 | |
121 | /* |
122 | * Reset DMA controller |
123 | * |
124 | * SH7780 has two DMAOR register |
125 | */ |
126 | static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) |
127 | { |
128 | unsigned short dmaor; |
129 | unsigned long flags; |
130 | |
131 | spin_lock_irqsave(&sh_dmae_lock, flags); |
132 | |
133 | dmaor = dmaor_read(shdev); |
134 | dmaor_write(shdev, data: dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); |
135 | |
136 | spin_unlock_irqrestore(lock: &sh_dmae_lock, flags); |
137 | } |
138 | |
139 | static int sh_dmae_rst(struct sh_dmae_device *shdev) |
140 | { |
141 | unsigned short dmaor; |
142 | unsigned long flags; |
143 | |
144 | spin_lock_irqsave(&sh_dmae_lock, flags); |
145 | |
146 | dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME); |
147 | |
148 | if (shdev->pdata->chclr_present) { |
149 | int i; |
150 | for (i = 0; i < shdev->pdata->channel_num; i++) { |
151 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; |
152 | if (sh_chan) |
153 | channel_clear(sh_dc: sh_chan); |
154 | } |
155 | } |
156 | |
157 | dmaor_write(shdev, data: dmaor | shdev->pdata->dmaor_init); |
158 | |
159 | dmaor = dmaor_read(shdev); |
160 | |
161 | spin_unlock_irqrestore(lock: &sh_dmae_lock, flags); |
162 | |
163 | if (dmaor & (DMAOR_AE | DMAOR_NMIF)) { |
164 | dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n" ); |
165 | return -EIO; |
166 | } |
167 | if (shdev->pdata->dmaor_init & ~dmaor) |
168 | dev_warn(shdev->shdma_dev.dma_dev.dev, |
169 | "DMAOR=0x%x hasn't latched the initial value 0x%x.\n" , |
170 | dmaor, shdev->pdata->dmaor_init); |
171 | return 0; |
172 | } |
173 | |
174 | static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) |
175 | { |
176 | u32 chcr = chcr_read(sh_dc: sh_chan); |
177 | |
178 | if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) |
179 | return true; /* working */ |
180 | |
181 | return false; /* waiting */ |
182 | } |
183 | |
184 | static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) |
185 | { |
186 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); |
187 | const struct sh_dmae_pdata *pdata = shdev->pdata; |
188 | int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | |
189 | ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); |
190 | |
191 | if (cnt >= pdata->ts_shift_num) |
192 | cnt = 0; |
193 | |
194 | return pdata->ts_shift[cnt]; |
195 | } |
196 | |
197 | static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) |
198 | { |
199 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); |
200 | const struct sh_dmae_pdata *pdata = shdev->pdata; |
201 | int i; |
202 | |
203 | for (i = 0; i < pdata->ts_shift_num; i++) |
204 | if (pdata->ts_shift[i] == l2size) |
205 | break; |
206 | |
207 | if (i == pdata->ts_shift_num) |
208 | i = 0; |
209 | |
210 | return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) | |
211 | ((i << pdata->ts_high_shift) & pdata->ts_high_mask); |
212 | } |
213 | |
214 | static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) |
215 | { |
216 | sh_dmae_writel(sh_dc: sh_chan, data: hw->sar, SAR); |
217 | sh_dmae_writel(sh_dc: sh_chan, data: hw->dar, DAR); |
218 | sh_dmae_writel(sh_dc: sh_chan, data: hw->tcr >> sh_chan->xmit_shift, TCR); |
219 | } |
220 | |
221 | static void dmae_start(struct sh_dmae_chan *sh_chan) |
222 | { |
223 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); |
224 | u32 chcr = chcr_read(sh_dc: sh_chan); |
225 | |
226 | if (shdev->pdata->needs_tend_set) |
227 | sh_dmae_writel(sh_dc: sh_chan, data: 0xFFFFFFFF, TEND); |
228 | |
229 | chcr |= CHCR_DE | shdev->chcr_ie_bit; |
230 | chcr_write(sh_dc: sh_chan, data: chcr & ~CHCR_TE); |
231 | } |
232 | |
233 | static void dmae_init(struct sh_dmae_chan *sh_chan) |
234 | { |
235 | /* |
236 | * Default configuration for dual address memory-memory transfer. |
237 | */ |
238 | u32 chcr = DM_INC | SM_INC | RS_AUTO | log2size_to_chcr(sh_chan, |
239 | LOG2_DEFAULT_XFER_SIZE); |
240 | sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); |
241 | chcr_write(sh_dc: sh_chan, data: chcr); |
242 | } |
243 | |
244 | static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) |
245 | { |
246 | /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */ |
247 | if (dmae_is_busy(sh_chan)) |
248 | return -EBUSY; |
249 | |
250 | sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr: val); |
251 | chcr_write(sh_dc: sh_chan, data: val); |
252 | |
253 | return 0; |
254 | } |
255 | |
256 | static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) |
257 | { |
258 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); |
259 | const struct sh_dmae_pdata *pdata = shdev->pdata; |
260 | const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id]; |
261 | void __iomem *addr = shdev->dmars; |
262 | unsigned int shift = chan_pdata->dmars_bit; |
263 | |
264 | if (dmae_is_busy(sh_chan)) |
265 | return -EBUSY; |
266 | |
267 | if (pdata->no_dmars) |
268 | return 0; |
269 | |
270 | /* in the case of a missing DMARS resource use first memory window */ |
271 | if (!addr) |
272 | addr = shdev->chan_reg; |
273 | addr += chan_pdata->dmars; |
274 | |
275 | __raw_writew(val: (__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), |
276 | addr); |
277 | |
278 | return 0; |
279 | } |
280 | |
281 | static void sh_dmae_start_xfer(struct shdma_chan *schan, |
282 | struct shdma_desc *sdesc) |
283 | { |
284 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, |
285 | shdma_chan); |
286 | struct sh_dmae_desc *sh_desc = container_of(sdesc, |
287 | struct sh_dmae_desc, shdma_desc); |
288 | dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n" , |
289 | sdesc->async_tx.cookie, sh_chan->shdma_chan.id, |
290 | sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar); |
291 | /* Get the ld start address from ld_queue */ |
292 | dmae_set_reg(sh_chan, hw: &sh_desc->hw); |
293 | dmae_start(sh_chan); |
294 | } |
295 | |
296 | static bool sh_dmae_channel_busy(struct shdma_chan *schan) |
297 | { |
298 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, |
299 | shdma_chan); |
300 | return dmae_is_busy(sh_chan); |
301 | } |
302 | |
303 | static void sh_dmae_setup_xfer(struct shdma_chan *schan, |
304 | int slave_id) |
305 | { |
306 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, |
307 | shdma_chan); |
308 | |
309 | if (slave_id >= 0) { |
310 | const struct sh_dmae_slave_config *cfg = |
311 | sh_chan->config; |
312 | |
313 | dmae_set_dmars(sh_chan, val: cfg->mid_rid); |
314 | dmae_set_chcr(sh_chan, val: cfg->chcr); |
315 | } else { |
316 | dmae_init(sh_chan); |
317 | } |
318 | } |
319 | |
320 | /* |
321 | * Find a slave channel configuration from the contoller list by either a slave |
322 | * ID in the non-DT case, or by a MID/RID value in the DT case |
323 | */ |
324 | static const struct sh_dmae_slave_config *dmae_find_slave( |
325 | struct sh_dmae_chan *sh_chan, int match) |
326 | { |
327 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); |
328 | const struct sh_dmae_pdata *pdata = shdev->pdata; |
329 | const struct sh_dmae_slave_config *cfg; |
330 | int i; |
331 | |
332 | if (!sh_chan->shdma_chan.dev->of_node) { |
333 | if (match >= SH_DMA_SLAVE_NUMBER) |
334 | return NULL; |
335 | |
336 | for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) |
337 | if (cfg->slave_id == match) |
338 | return cfg; |
339 | } else { |
340 | for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) |
341 | if (cfg->mid_rid == match) { |
342 | sh_chan->shdma_chan.slave_id = i; |
343 | return cfg; |
344 | } |
345 | } |
346 | |
347 | return NULL; |
348 | } |
349 | |
350 | static int sh_dmae_set_slave(struct shdma_chan *schan, |
351 | int slave_id, dma_addr_t slave_addr, bool try) |
352 | { |
353 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, |
354 | shdma_chan); |
355 | const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, match: slave_id); |
356 | if (!cfg) |
357 | return -ENXIO; |
358 | |
359 | if (!try) { |
360 | sh_chan->config = cfg; |
361 | sh_chan->slave_addr = slave_addr ? : cfg->addr; |
362 | } |
363 | |
364 | return 0; |
365 | } |
366 | |
367 | static void dmae_halt(struct sh_dmae_chan *sh_chan) |
368 | { |
369 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); |
370 | u32 chcr = chcr_read(sh_dc: sh_chan); |
371 | |
372 | chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit); |
373 | chcr_write(sh_dc: sh_chan, data: chcr); |
374 | } |
375 | |
376 | static int sh_dmae_desc_setup(struct shdma_chan *schan, |
377 | struct shdma_desc *sdesc, |
378 | dma_addr_t src, dma_addr_t dst, size_t *len) |
379 | { |
380 | struct sh_dmae_desc *sh_desc = container_of(sdesc, |
381 | struct sh_dmae_desc, shdma_desc); |
382 | |
383 | if (*len > schan->max_xfer_len) |
384 | *len = schan->max_xfer_len; |
385 | |
386 | sh_desc->hw.sar = src; |
387 | sh_desc->hw.dar = dst; |
388 | sh_desc->hw.tcr = *len; |
389 | |
390 | return 0; |
391 | } |
392 | |
393 | static void sh_dmae_halt(struct shdma_chan *schan) |
394 | { |
395 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, |
396 | shdma_chan); |
397 | dmae_halt(sh_chan); |
398 | } |
399 | |
400 | static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq) |
401 | { |
402 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, |
403 | shdma_chan); |
404 | |
405 | if (!(chcr_read(sh_dc: sh_chan) & CHCR_TE)) |
406 | return false; |
407 | |
408 | /* DMA stop */ |
409 | dmae_halt(sh_chan); |
410 | |
411 | return true; |
412 | } |
413 | |
414 | static size_t sh_dmae_get_partial(struct shdma_chan *schan, |
415 | struct shdma_desc *sdesc) |
416 | { |
417 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, |
418 | shdma_chan); |
419 | struct sh_dmae_desc *sh_desc = container_of(sdesc, |
420 | struct sh_dmae_desc, shdma_desc); |
421 | return sh_desc->hw.tcr - |
422 | (sh_dmae_readl(sh_dc: sh_chan, TCR) << sh_chan->xmit_shift); |
423 | } |
424 | |
425 | /* Called from error IRQ or NMI */ |
426 | static bool sh_dmae_reset(struct sh_dmae_device *shdev) |
427 | { |
428 | bool ret; |
429 | |
430 | /* halt the dma controller */ |
431 | sh_dmae_ctl_stop(shdev); |
432 | |
433 | /* We cannot detect, which channel caused the error, have to reset all */ |
434 | ret = shdma_reset(sdev: &shdev->shdma_dev); |
435 | |
436 | sh_dmae_rst(shdev); |
437 | |
438 | return ret; |
439 | } |
440 | |
441 | static irqreturn_t sh_dmae_err(int irq, void *data) |
442 | { |
443 | struct sh_dmae_device *shdev = data; |
444 | |
445 | if (!(dmaor_read(shdev) & DMAOR_AE)) |
446 | return IRQ_NONE; |
447 | |
448 | sh_dmae_reset(shdev); |
449 | return IRQ_HANDLED; |
450 | } |
451 | |
452 | static bool sh_dmae_desc_completed(struct shdma_chan *schan, |
453 | struct shdma_desc *sdesc) |
454 | { |
455 | struct sh_dmae_chan *sh_chan = container_of(schan, |
456 | struct sh_dmae_chan, shdma_chan); |
457 | struct sh_dmae_desc *sh_desc = container_of(sdesc, |
458 | struct sh_dmae_desc, shdma_desc); |
459 | u32 sar_buf = sh_dmae_readl(sh_dc: sh_chan, SAR); |
460 | u32 dar_buf = sh_dmae_readl(sh_dc: sh_chan, DAR); |
461 | |
462 | return (sdesc->direction == DMA_DEV_TO_MEM && |
463 | (sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) || |
464 | (sdesc->direction != DMA_DEV_TO_MEM && |
465 | (sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf); |
466 | } |
467 | |
468 | static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev) |
469 | { |
470 | /* Fast path out if NMIF is not asserted for this controller */ |
471 | if ((dmaor_read(shdev) & DMAOR_NMIF) == 0) |
472 | return false; |
473 | |
474 | return sh_dmae_reset(shdev); |
475 | } |
476 | |
477 | static int sh_dmae_nmi_handler(struct notifier_block *self, |
478 | unsigned long cmd, void *data) |
479 | { |
480 | struct sh_dmae_device *shdev; |
481 | int ret = NOTIFY_DONE; |
482 | bool triggered; |
483 | |
484 | /* |
485 | * Only concern ourselves with NMI events. |
486 | * |
487 | * Normally we would check the die chain value, but as this needs |
488 | * to be architecture independent, check for NMI context instead. |
489 | */ |
490 | if (!in_nmi()) |
491 | return NOTIFY_DONE; |
492 | |
493 | rcu_read_lock(); |
494 | list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) { |
495 | /* |
496 | * Only stop if one of the controllers has NMIF asserted, |
497 | * we do not want to interfere with regular address error |
498 | * handling or NMI events that don't concern the DMACs. |
499 | */ |
500 | triggered = sh_dmae_nmi_notify(shdev); |
501 | if (triggered == true) |
502 | ret = NOTIFY_OK; |
503 | } |
504 | rcu_read_unlock(); |
505 | |
506 | return ret; |
507 | } |
508 | |
509 | static struct notifier_block sh_dmae_nmi_notifier __read_mostly = { |
510 | .notifier_call = sh_dmae_nmi_handler, |
511 | |
512 | /* Run before NMI debug handler and KGDB */ |
513 | .priority = 1, |
514 | }; |
515 | |
516 | static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, |
517 | int irq, unsigned long flags) |
518 | { |
519 | const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id]; |
520 | struct shdma_dev *sdev = &shdev->shdma_dev; |
521 | struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev); |
522 | struct sh_dmae_chan *sh_chan; |
523 | struct shdma_chan *schan; |
524 | int err; |
525 | |
526 | sh_chan = devm_kzalloc(dev: sdev->dma_dev.dev, size: sizeof(struct sh_dmae_chan), |
527 | GFP_KERNEL); |
528 | if (!sh_chan) |
529 | return -ENOMEM; |
530 | |
531 | schan = &sh_chan->shdma_chan; |
532 | schan->max_xfer_len = SH_DMA_TCR_MAX + 1; |
533 | |
534 | shdma_chan_probe(sdev, schan, id); |
535 | |
536 | sh_chan->base = shdev->chan_reg + chan_pdata->offset; |
537 | |
538 | /* set up channel irq */ |
539 | if (pdev->id >= 0) |
540 | snprintf(buf: sh_chan->dev_id, size: sizeof(sh_chan->dev_id), |
541 | fmt: "sh-dmae%d.%d" , pdev->id, id); |
542 | else |
543 | snprintf(buf: sh_chan->dev_id, size: sizeof(sh_chan->dev_id), |
544 | fmt: "sh-dma%d" , id); |
545 | |
546 | err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id); |
547 | if (err) { |
548 | dev_err(sdev->dma_dev.dev, |
549 | "DMA channel %d request_irq error %d\n" , |
550 | id, err); |
551 | goto err_no_irq; |
552 | } |
553 | |
554 | shdev->chan[id] = sh_chan; |
555 | return 0; |
556 | |
557 | err_no_irq: |
558 | /* remove from dmaengine device node */ |
559 | shdma_chan_remove(schan); |
560 | return err; |
561 | } |
562 | |
563 | static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) |
564 | { |
565 | struct shdma_chan *schan; |
566 | int i; |
567 | |
568 | shdma_for_each_chan(schan, &shdev->shdma_dev, i) { |
569 | BUG_ON(!schan); |
570 | |
571 | shdma_chan_remove(schan); |
572 | } |
573 | } |
574 | |
575 | #ifdef CONFIG_PM |
576 | static int sh_dmae_runtime_suspend(struct device *dev) |
577 | { |
578 | struct sh_dmae_device *shdev = dev_get_drvdata(dev); |
579 | |
580 | sh_dmae_ctl_stop(shdev); |
581 | return 0; |
582 | } |
583 | |
584 | static int sh_dmae_runtime_resume(struct device *dev) |
585 | { |
586 | struct sh_dmae_device *shdev = dev_get_drvdata(dev); |
587 | |
588 | return sh_dmae_rst(shdev); |
589 | } |
590 | #endif |
591 | |
592 | #ifdef CONFIG_PM_SLEEP |
593 | static int sh_dmae_suspend(struct device *dev) |
594 | { |
595 | struct sh_dmae_device *shdev = dev_get_drvdata(dev); |
596 | |
597 | sh_dmae_ctl_stop(shdev); |
598 | return 0; |
599 | } |
600 | |
601 | static int sh_dmae_resume(struct device *dev) |
602 | { |
603 | struct sh_dmae_device *shdev = dev_get_drvdata(dev); |
604 | int i, ret; |
605 | |
606 | ret = sh_dmae_rst(shdev); |
607 | if (ret < 0) |
608 | dev_err(dev, "Failed to reset!\n" ); |
609 | |
610 | for (i = 0; i < shdev->pdata->channel_num; i++) { |
611 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; |
612 | |
613 | if (!sh_chan->shdma_chan.desc_num) |
614 | continue; |
615 | |
616 | if (sh_chan->shdma_chan.slave_id >= 0) { |
617 | const struct sh_dmae_slave_config *cfg = sh_chan->config; |
618 | dmae_set_dmars(sh_chan, val: cfg->mid_rid); |
619 | dmae_set_chcr(sh_chan, val: cfg->chcr); |
620 | } else { |
621 | dmae_init(sh_chan); |
622 | } |
623 | } |
624 | |
625 | return 0; |
626 | } |
627 | #endif |
628 | |
629 | static const struct dev_pm_ops sh_dmae_pm = { |
630 | SET_SYSTEM_SLEEP_PM_OPS(sh_dmae_suspend, sh_dmae_resume) |
631 | SET_RUNTIME_PM_OPS(sh_dmae_runtime_suspend, sh_dmae_runtime_resume, |
632 | NULL) |
633 | }; |
634 | |
635 | static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan) |
636 | { |
637 | struct sh_dmae_chan *sh_chan = container_of(schan, |
638 | struct sh_dmae_chan, shdma_chan); |
639 | |
640 | /* |
641 | * Implicit BUG_ON(!sh_chan->config) |
642 | * This is an exclusive slave DMA operation, may only be called after a |
643 | * successful slave configuration. |
644 | */ |
645 | return sh_chan->slave_addr; |
646 | } |
647 | |
648 | static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i) |
649 | { |
650 | return &((struct sh_dmae_desc *)buf)[i].shdma_desc; |
651 | } |
652 | |
653 | static const struct shdma_ops sh_dmae_shdma_ops = { |
654 | .desc_completed = sh_dmae_desc_completed, |
655 | .halt_channel = sh_dmae_halt, |
656 | .channel_busy = sh_dmae_channel_busy, |
657 | .slave_addr = sh_dmae_slave_addr, |
658 | .desc_setup = sh_dmae_desc_setup, |
659 | .set_slave = sh_dmae_set_slave, |
660 | .setup_xfer = sh_dmae_setup_xfer, |
661 | .start_xfer = sh_dmae_start_xfer, |
662 | .embedded_desc = sh_dmae_embedded_desc, |
663 | .chan_irq = sh_dmae_chan_irq, |
664 | .get_partial = sh_dmae_get_partial, |
665 | }; |
666 | |
667 | static int sh_dmae_probe(struct platform_device *pdev) |
668 | { |
669 | const enum dma_slave_buswidth widths = |
670 | DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES | |
671 | DMA_SLAVE_BUSWIDTH_4_BYTES | DMA_SLAVE_BUSWIDTH_8_BYTES | |
672 | DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES; |
673 | const struct sh_dmae_pdata *pdata; |
674 | unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {}; |
675 | int chan_irq[SH_DMAE_MAX_CHANNELS]; |
676 | unsigned long irqflags = 0; |
677 | int err, errirq, i, irq_cnt = 0, irqres = 0, irq_cap = 0; |
678 | struct sh_dmae_device *shdev; |
679 | struct dma_device *dma_dev; |
680 | struct resource *dmars, *errirq_res, *chanirq_res; |
681 | |
682 | if (pdev->dev.of_node) |
683 | pdata = of_device_get_match_data(dev: &pdev->dev); |
684 | else |
685 | pdata = dev_get_platdata(dev: &pdev->dev); |
686 | |
687 | /* get platform data */ |
688 | if (!pdata || !pdata->channel_num) |
689 | return -ENODEV; |
690 | |
691 | /* DMARS area is optional */ |
692 | dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
693 | /* |
694 | * IRQ resources: |
695 | * 1. there always must be at least one IRQ IO-resource. On SH4 it is |
696 | * the error IRQ, in which case it is the only IRQ in this resource: |
697 | * start == end. If it is the only IRQ resource, all channels also |
698 | * use the same IRQ. |
699 | * 2. DMA channel IRQ resources can be specified one per resource or in |
700 | * ranges (start != end) |
701 | * 3. iff all events (channels and, optionally, error) on this |
702 | * controller use the same IRQ, only one IRQ resource can be |
703 | * specified, otherwise there must be one IRQ per channel, even if |
704 | * some of them are equal |
705 | * 4. if all IRQs on this controller are equal or if some specific IRQs |
706 | * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be |
707 | * requested with the IRQF_SHARED flag |
708 | */ |
709 | errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
710 | if (!errirq_res) |
711 | return -ENODEV; |
712 | |
713 | shdev = devm_kzalloc(dev: &pdev->dev, size: sizeof(struct sh_dmae_device), |
714 | GFP_KERNEL); |
715 | if (!shdev) |
716 | return -ENOMEM; |
717 | |
718 | dma_dev = &shdev->shdma_dev.dma_dev; |
719 | |
720 | shdev->chan_reg = devm_platform_ioremap_resource(pdev, index: 0); |
721 | if (IS_ERR(ptr: shdev->chan_reg)) |
722 | return PTR_ERR(ptr: shdev->chan_reg); |
723 | if (dmars) { |
724 | shdev->dmars = devm_ioremap_resource(dev: &pdev->dev, res: dmars); |
725 | if (IS_ERR(ptr: shdev->dmars)) |
726 | return PTR_ERR(ptr: shdev->dmars); |
727 | } |
728 | |
729 | dma_dev->src_addr_widths = widths; |
730 | dma_dev->dst_addr_widths = widths; |
731 | dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); |
732 | dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; |
733 | |
734 | if (!pdata->slave_only) |
735 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); |
736 | if (pdata->slave && pdata->slave_num) |
737 | dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); |
738 | |
739 | /* Default transfer size of 32 bytes requires 32-byte alignment */ |
740 | dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE; |
741 | |
742 | shdev->shdma_dev.ops = &sh_dmae_shdma_ops; |
743 | shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc); |
744 | err = shdma_init(dev: &pdev->dev, sdev: &shdev->shdma_dev, |
745 | chan_num: pdata->channel_num); |
746 | if (err < 0) |
747 | goto eshdma; |
748 | |
749 | /* platform data */ |
750 | shdev->pdata = pdata; |
751 | |
752 | if (pdata->chcr_offset) |
753 | shdev->chcr_offset = pdata->chcr_offset; |
754 | else |
755 | shdev->chcr_offset = CHCR; |
756 | |
757 | if (pdata->chcr_ie_bit) |
758 | shdev->chcr_ie_bit = pdata->chcr_ie_bit; |
759 | else |
760 | shdev->chcr_ie_bit = CHCR_IE; |
761 | |
762 | platform_set_drvdata(pdev, data: shdev); |
763 | |
764 | pm_runtime_enable(dev: &pdev->dev); |
765 | err = pm_runtime_get_sync(dev: &pdev->dev); |
766 | if (err < 0) |
767 | dev_err(&pdev->dev, "%s(): GET = %d\n" , __func__, err); |
768 | |
769 | spin_lock_irq(lock: &sh_dmae_lock); |
770 | list_add_tail_rcu(new: &shdev->node, head: &sh_dmae_devices); |
771 | spin_unlock_irq(lock: &sh_dmae_lock); |
772 | |
773 | /* reset dma controller - only needed as a test */ |
774 | err = sh_dmae_rst(shdev); |
775 | if (err) |
776 | goto rst_err; |
777 | |
778 | if (IS_ENABLED(CONFIG_CPU_SH4) || IS_ENABLED(CONFIG_ARCH_RENESAS)) { |
779 | chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); |
780 | |
781 | if (!chanirq_res) |
782 | chanirq_res = errirq_res; |
783 | else |
784 | irqres++; |
785 | |
786 | if (chanirq_res == errirq_res || |
787 | (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE) |
788 | irqflags = IRQF_SHARED; |
789 | |
790 | errirq = errirq_res->start; |
791 | |
792 | err = devm_request_irq(dev: &pdev->dev, irq: errirq, handler: sh_dmae_err, |
793 | irqflags, devname: "DMAC Address Error" , dev_id: shdev); |
794 | if (err) { |
795 | dev_err(&pdev->dev, |
796 | "DMA failed requesting irq #%d, error %d\n" , |
797 | errirq, err); |
798 | goto eirq_err; |
799 | } |
800 | } else { |
801 | chanirq_res = errirq_res; |
802 | } |
803 | |
804 | if (chanirq_res->start == chanirq_res->end && |
805 | !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { |
806 | /* Special case - all multiplexed */ |
807 | for (; irq_cnt < pdata->channel_num; irq_cnt++) { |
808 | if (irq_cnt < SH_DMAE_MAX_CHANNELS) { |
809 | chan_irq[irq_cnt] = chanirq_res->start; |
810 | chan_flag[irq_cnt] = IRQF_SHARED; |
811 | } else { |
812 | irq_cap = 1; |
813 | break; |
814 | } |
815 | } |
816 | } else { |
817 | do { |
818 | for (i = chanirq_res->start; i <= chanirq_res->end; i++) { |
819 | if (irq_cnt >= SH_DMAE_MAX_CHANNELS) { |
820 | irq_cap = 1; |
821 | break; |
822 | } |
823 | |
824 | if ((errirq_res->flags & IORESOURCE_BITS) == |
825 | IORESOURCE_IRQ_SHAREABLE) |
826 | chan_flag[irq_cnt] = IRQF_SHARED; |
827 | else |
828 | chan_flag[irq_cnt] = 0; |
829 | dev_dbg(&pdev->dev, |
830 | "Found IRQ %d for channel %d\n" , |
831 | i, irq_cnt); |
832 | chan_irq[irq_cnt++] = i; |
833 | } |
834 | |
835 | if (irq_cnt >= SH_DMAE_MAX_CHANNELS) |
836 | break; |
837 | |
838 | chanirq_res = platform_get_resource(pdev, |
839 | IORESOURCE_IRQ, ++irqres); |
840 | } while (irq_cnt < pdata->channel_num && chanirq_res); |
841 | } |
842 | |
843 | /* Create DMA Channel */ |
844 | for (i = 0; i < irq_cnt; i++) { |
845 | err = sh_dmae_chan_probe(shdev, id: i, irq: chan_irq[i], flags: chan_flag[i]); |
846 | if (err) |
847 | goto chan_probe_err; |
848 | } |
849 | |
850 | if (irq_cap) |
851 | dev_notice(&pdev->dev, "Attempting to register %d DMA " |
852 | "channels when a maximum of %d are supported.\n" , |
853 | pdata->channel_num, SH_DMAE_MAX_CHANNELS); |
854 | |
855 | pm_runtime_put(dev: &pdev->dev); |
856 | |
857 | err = dma_async_device_register(device: &shdev->shdma_dev.dma_dev); |
858 | if (err < 0) |
859 | goto edmadevreg; |
860 | |
861 | return err; |
862 | |
863 | edmadevreg: |
864 | pm_runtime_get(dev: &pdev->dev); |
865 | |
866 | chan_probe_err: |
867 | sh_dmae_chan_remove(shdev); |
868 | |
869 | eirq_err: |
870 | rst_err: |
871 | spin_lock_irq(lock: &sh_dmae_lock); |
872 | list_del_rcu(entry: &shdev->node); |
873 | spin_unlock_irq(lock: &sh_dmae_lock); |
874 | |
875 | pm_runtime_put(dev: &pdev->dev); |
876 | pm_runtime_disable(dev: &pdev->dev); |
877 | |
878 | shdma_cleanup(sdev: &shdev->shdma_dev); |
879 | eshdma: |
880 | synchronize_rcu(); |
881 | |
882 | return err; |
883 | } |
884 | |
885 | static void sh_dmae_remove(struct platform_device *pdev) |
886 | { |
887 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); |
888 | struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev; |
889 | |
890 | dma_async_device_unregister(device: dma_dev); |
891 | |
892 | spin_lock_irq(lock: &sh_dmae_lock); |
893 | list_del_rcu(entry: &shdev->node); |
894 | spin_unlock_irq(lock: &sh_dmae_lock); |
895 | |
896 | pm_runtime_disable(dev: &pdev->dev); |
897 | |
898 | sh_dmae_chan_remove(shdev); |
899 | shdma_cleanup(sdev: &shdev->shdma_dev); |
900 | |
901 | synchronize_rcu(); |
902 | } |
903 | |
904 | static struct platform_driver sh_dmae_driver = { |
905 | .driver = { |
906 | .pm = &sh_dmae_pm, |
907 | .name = SH_DMAE_DRV_NAME, |
908 | }, |
909 | .remove_new = sh_dmae_remove, |
910 | }; |
911 | |
912 | static int __init sh_dmae_init(void) |
913 | { |
914 | /* Wire up NMI handling */ |
915 | int err = register_die_notifier(nb: &sh_dmae_nmi_notifier); |
916 | if (err) |
917 | return err; |
918 | |
919 | return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe); |
920 | } |
921 | module_init(sh_dmae_init); |
922 | |
923 | static void __exit sh_dmae_exit(void) |
924 | { |
925 | platform_driver_unregister(&sh_dmae_driver); |
926 | |
927 | unregister_die_notifier(nb: &sh_dmae_nmi_notifier); |
928 | } |
929 | module_exit(sh_dmae_exit); |
930 | |
931 | MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>" ); |
932 | MODULE_DESCRIPTION("Renesas SH DMA Engine driver" ); |
933 | MODULE_LICENSE("GPL" ); |
934 | MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME); |
935 | |