1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * IMG Multi-threaded DMA Controller (MDC) |
4 | * |
5 | * Copyright (C) 2009,2012,2013 Imagination Technologies Ltd. |
6 | * Copyright (C) 2014 Google, Inc. |
7 | */ |
8 | |
9 | #include <linux/clk.h> |
10 | #include <linux/dma-mapping.h> |
11 | #include <linux/dmaengine.h> |
12 | #include <linux/dmapool.h> |
13 | #include <linux/interrupt.h> |
14 | #include <linux/io.h> |
15 | #include <linux/irq.h> |
16 | #include <linux/kernel.h> |
17 | #include <linux/mfd/syscon.h> |
18 | #include <linux/module.h> |
19 | #include <linux/of.h> |
20 | #include <linux/of_dma.h> |
21 | #include <linux/platform_device.h> |
22 | #include <linux/pm_runtime.h> |
23 | #include <linux/regmap.h> |
24 | #include <linux/slab.h> |
25 | #include <linux/spinlock.h> |
26 | |
27 | #include "dmaengine.h" |
28 | #include "virt-dma.h" |
29 | |
30 | #define MDC_MAX_DMA_CHANNELS 32 |
31 | |
32 | #define MDC_GENERAL_CONFIG 0x000 |
33 | #define MDC_GENERAL_CONFIG_LIST_IEN BIT(31) |
34 | #define MDC_GENERAL_CONFIG_IEN BIT(29) |
35 | #define MDC_GENERAL_CONFIG_LEVEL_INT BIT(28) |
36 | #define MDC_GENERAL_CONFIG_INC_W BIT(12) |
37 | #define MDC_GENERAL_CONFIG_INC_R BIT(8) |
38 | #define MDC_GENERAL_CONFIG_PHYSICAL_W BIT(7) |
39 | #define MDC_GENERAL_CONFIG_WIDTH_W_SHIFT 4 |
40 | #define MDC_GENERAL_CONFIG_WIDTH_W_MASK 0x7 |
41 | #define MDC_GENERAL_CONFIG_PHYSICAL_R BIT(3) |
42 | #define MDC_GENERAL_CONFIG_WIDTH_R_SHIFT 0 |
43 | #define MDC_GENERAL_CONFIG_WIDTH_R_MASK 0x7 |
44 | |
45 | #define MDC_READ_PORT_CONFIG 0x004 |
46 | #define MDC_READ_PORT_CONFIG_STHREAD_SHIFT 28 |
47 | #define MDC_READ_PORT_CONFIG_STHREAD_MASK 0xf |
48 | #define MDC_READ_PORT_CONFIG_RTHREAD_SHIFT 24 |
49 | #define MDC_READ_PORT_CONFIG_RTHREAD_MASK 0xf |
50 | #define MDC_READ_PORT_CONFIG_WTHREAD_SHIFT 16 |
51 | #define MDC_READ_PORT_CONFIG_WTHREAD_MASK 0xf |
52 | #define MDC_READ_PORT_CONFIG_BURST_SIZE_SHIFT 4 |
53 | #define MDC_READ_PORT_CONFIG_BURST_SIZE_MASK 0xff |
54 | #define MDC_READ_PORT_CONFIG_DREQ_ENABLE BIT(1) |
55 | |
56 | #define MDC_READ_ADDRESS 0x008 |
57 | |
58 | #define MDC_WRITE_ADDRESS 0x00c |
59 | |
60 | #define MDC_TRANSFER_SIZE 0x010 |
61 | #define MDC_TRANSFER_SIZE_MASK 0xffffff |
62 | |
63 | #define MDC_LIST_NODE_ADDRESS 0x014 |
64 | |
65 | #define MDC_CMDS_PROCESSED 0x018 |
66 | #define MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT 16 |
67 | #define MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK 0x3f |
68 | #define MDC_CMDS_PROCESSED_INT_ACTIVE BIT(8) |
69 | #define MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT 0 |
70 | #define MDC_CMDS_PROCESSED_CMDS_DONE_MASK 0x3f |
71 | |
72 | #define MDC_CONTROL_AND_STATUS 0x01c |
73 | #define MDC_CONTROL_AND_STATUS_CANCEL BIT(20) |
74 | #define MDC_CONTROL_AND_STATUS_LIST_EN BIT(4) |
75 | #define MDC_CONTROL_AND_STATUS_EN BIT(0) |
76 | |
77 | #define MDC_ACTIVE_TRANSFER_SIZE 0x030 |
78 | |
79 | #define MDC_GLOBAL_CONFIG_A 0x900 |
80 | #define MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_SHIFT 16 |
81 | #define MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_MASK 0xff |
82 | #define MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_SHIFT 8 |
83 | #define MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_MASK 0xff |
84 | #define MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_SHIFT 0 |
85 | #define MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_MASK 0xff |
86 | |
87 | struct mdc_hw_list_desc { |
88 | u32 gen_conf; |
89 | u32 readport_conf; |
90 | u32 read_addr; |
91 | u32 write_addr; |
92 | u32 xfer_size; |
93 | u32 node_addr; |
94 | u32 cmds_done; |
95 | u32 ctrl_status; |
96 | /* |
97 | * Not part of the list descriptor, but instead used by the CPU to |
98 | * traverse the list. |
99 | */ |
100 | struct mdc_hw_list_desc *next_desc; |
101 | }; |
102 | |
103 | struct mdc_tx_desc { |
104 | struct mdc_chan *chan; |
105 | struct virt_dma_desc vd; |
106 | dma_addr_t list_phys; |
107 | struct mdc_hw_list_desc *list; |
108 | bool cyclic; |
109 | bool cmd_loaded; |
110 | unsigned int list_len; |
111 | unsigned int list_period_len; |
112 | size_t list_xfer_size; |
113 | unsigned int list_cmds_done; |
114 | }; |
115 | |
116 | struct mdc_chan { |
117 | struct mdc_dma *mdma; |
118 | struct virt_dma_chan vc; |
119 | struct dma_slave_config config; |
120 | struct mdc_tx_desc *desc; |
121 | int irq; |
122 | unsigned int periph; |
123 | unsigned int thread; |
124 | unsigned int chan_nr; |
125 | }; |
126 | |
127 | struct mdc_dma_soc_data { |
128 | void (*enable_chan)(struct mdc_chan *mchan); |
129 | void (*disable_chan)(struct mdc_chan *mchan); |
130 | }; |
131 | |
132 | struct mdc_dma { |
133 | struct dma_device dma_dev; |
134 | void __iomem *regs; |
135 | struct clk *clk; |
136 | struct dma_pool *desc_pool; |
137 | struct regmap *periph_regs; |
138 | spinlock_t lock; |
139 | unsigned int nr_threads; |
140 | unsigned int nr_channels; |
141 | unsigned int bus_width; |
142 | unsigned int max_burst_mult; |
143 | unsigned int max_xfer_size; |
144 | const struct mdc_dma_soc_data *soc; |
145 | struct mdc_chan channels[MDC_MAX_DMA_CHANNELS]; |
146 | }; |
147 | |
148 | static inline u32 mdc_readl(struct mdc_dma *mdma, u32 reg) |
149 | { |
150 | return readl(addr: mdma->regs + reg); |
151 | } |
152 | |
153 | static inline void mdc_writel(struct mdc_dma *mdma, u32 val, u32 reg) |
154 | { |
155 | writel(val, addr: mdma->regs + reg); |
156 | } |
157 | |
158 | static inline u32 mdc_chan_readl(struct mdc_chan *mchan, u32 reg) |
159 | { |
160 | return mdc_readl(mdma: mchan->mdma, reg: mchan->chan_nr * 0x040 + reg); |
161 | } |
162 | |
163 | static inline void mdc_chan_writel(struct mdc_chan *mchan, u32 val, u32 reg) |
164 | { |
165 | mdc_writel(mdma: mchan->mdma, val, reg: mchan->chan_nr * 0x040 + reg); |
166 | } |
167 | |
168 | static inline struct mdc_chan *to_mdc_chan(struct dma_chan *c) |
169 | { |
170 | return container_of(to_virt_chan(c), struct mdc_chan, vc); |
171 | } |
172 | |
173 | static inline struct mdc_tx_desc *to_mdc_desc(struct dma_async_tx_descriptor *t) |
174 | { |
175 | struct virt_dma_desc *vdesc = container_of(t, struct virt_dma_desc, tx); |
176 | |
177 | return container_of(vdesc, struct mdc_tx_desc, vd); |
178 | } |
179 | |
180 | static inline struct device *mdma2dev(struct mdc_dma *mdma) |
181 | { |
182 | return mdma->dma_dev.dev; |
183 | } |
184 | |
185 | static inline unsigned int to_mdc_width(unsigned int bytes) |
186 | { |
187 | return ffs(bytes) - 1; |
188 | } |
189 | |
190 | static inline void mdc_set_read_width(struct mdc_hw_list_desc *ldesc, |
191 | unsigned int bytes) |
192 | { |
193 | ldesc->gen_conf |= to_mdc_width(bytes) << |
194 | MDC_GENERAL_CONFIG_WIDTH_R_SHIFT; |
195 | } |
196 | |
197 | static inline void mdc_set_write_width(struct mdc_hw_list_desc *ldesc, |
198 | unsigned int bytes) |
199 | { |
200 | ldesc->gen_conf |= to_mdc_width(bytes) << |
201 | MDC_GENERAL_CONFIG_WIDTH_W_SHIFT; |
202 | } |
203 | |
204 | static void mdc_list_desc_config(struct mdc_chan *mchan, |
205 | struct mdc_hw_list_desc *ldesc, |
206 | enum dma_transfer_direction dir, |
207 | dma_addr_t src, dma_addr_t dst, size_t len) |
208 | { |
209 | struct mdc_dma *mdma = mchan->mdma; |
210 | unsigned int max_burst, burst_size; |
211 | |
212 | ldesc->gen_conf = MDC_GENERAL_CONFIG_IEN | MDC_GENERAL_CONFIG_LIST_IEN | |
213 | MDC_GENERAL_CONFIG_LEVEL_INT | MDC_GENERAL_CONFIG_PHYSICAL_W | |
214 | MDC_GENERAL_CONFIG_PHYSICAL_R; |
215 | ldesc->readport_conf = |
216 | (mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) | |
217 | (mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) | |
218 | (mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT); |
219 | ldesc->read_addr = src; |
220 | ldesc->write_addr = dst; |
221 | ldesc->xfer_size = len - 1; |
222 | ldesc->node_addr = 0; |
223 | ldesc->cmds_done = 0; |
224 | ldesc->ctrl_status = MDC_CONTROL_AND_STATUS_LIST_EN | |
225 | MDC_CONTROL_AND_STATUS_EN; |
226 | ldesc->next_desc = NULL; |
227 | |
228 | if (IS_ALIGNED(dst, mdma->bus_width) && |
229 | IS_ALIGNED(src, mdma->bus_width)) |
230 | max_burst = mdma->bus_width * mdma->max_burst_mult; |
231 | else |
232 | max_burst = mdma->bus_width * (mdma->max_burst_mult - 1); |
233 | |
234 | if (dir == DMA_MEM_TO_DEV) { |
235 | ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_R; |
236 | ldesc->readport_conf |= MDC_READ_PORT_CONFIG_DREQ_ENABLE; |
237 | mdc_set_read_width(ldesc, bytes: mdma->bus_width); |
238 | mdc_set_write_width(ldesc, bytes: mchan->config.dst_addr_width); |
239 | burst_size = min(max_burst, mchan->config.dst_maxburst * |
240 | mchan->config.dst_addr_width); |
241 | } else if (dir == DMA_DEV_TO_MEM) { |
242 | ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_W; |
243 | ldesc->readport_conf |= MDC_READ_PORT_CONFIG_DREQ_ENABLE; |
244 | mdc_set_read_width(ldesc, bytes: mchan->config.src_addr_width); |
245 | mdc_set_write_width(ldesc, bytes: mdma->bus_width); |
246 | burst_size = min(max_burst, mchan->config.src_maxburst * |
247 | mchan->config.src_addr_width); |
248 | } else { |
249 | ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_R | |
250 | MDC_GENERAL_CONFIG_INC_W; |
251 | mdc_set_read_width(ldesc, bytes: mdma->bus_width); |
252 | mdc_set_write_width(ldesc, bytes: mdma->bus_width); |
253 | burst_size = max_burst; |
254 | } |
255 | ldesc->readport_conf |= (burst_size - 1) << |
256 | MDC_READ_PORT_CONFIG_BURST_SIZE_SHIFT; |
257 | } |
258 | |
259 | static void mdc_list_desc_free(struct mdc_tx_desc *mdesc) |
260 | { |
261 | struct mdc_dma *mdma = mdesc->chan->mdma; |
262 | struct mdc_hw_list_desc *curr, *next; |
263 | dma_addr_t curr_phys, next_phys; |
264 | |
265 | curr = mdesc->list; |
266 | curr_phys = mdesc->list_phys; |
267 | while (curr) { |
268 | next = curr->next_desc; |
269 | next_phys = curr->node_addr; |
270 | dma_pool_free(pool: mdma->desc_pool, vaddr: curr, addr: curr_phys); |
271 | curr = next; |
272 | curr_phys = next_phys; |
273 | } |
274 | } |
275 | |
276 | static void mdc_desc_free(struct virt_dma_desc *vd) |
277 | { |
278 | struct mdc_tx_desc *mdesc = to_mdc_desc(t: &vd->tx); |
279 | |
280 | mdc_list_desc_free(mdesc); |
281 | kfree(objp: mdesc); |
282 | } |
283 | |
284 | static struct dma_async_tx_descriptor *mdc_prep_dma_memcpy( |
285 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, |
286 | unsigned long flags) |
287 | { |
288 | struct mdc_chan *mchan = to_mdc_chan(c: chan); |
289 | struct mdc_dma *mdma = mchan->mdma; |
290 | struct mdc_tx_desc *mdesc; |
291 | struct mdc_hw_list_desc *curr, *prev = NULL; |
292 | dma_addr_t curr_phys; |
293 | |
294 | if (!len) |
295 | return NULL; |
296 | |
297 | mdesc = kzalloc(size: sizeof(*mdesc), GFP_NOWAIT); |
298 | if (!mdesc) |
299 | return NULL; |
300 | mdesc->chan = mchan; |
301 | mdesc->list_xfer_size = len; |
302 | |
303 | while (len > 0) { |
304 | size_t xfer_size; |
305 | |
306 | curr = dma_pool_alloc(pool: mdma->desc_pool, GFP_NOWAIT, handle: &curr_phys); |
307 | if (!curr) |
308 | goto free_desc; |
309 | |
310 | if (prev) { |
311 | prev->node_addr = curr_phys; |
312 | prev->next_desc = curr; |
313 | } else { |
314 | mdesc->list_phys = curr_phys; |
315 | mdesc->list = curr; |
316 | } |
317 | |
318 | xfer_size = min_t(size_t, mdma->max_xfer_size, len); |
319 | |
320 | mdc_list_desc_config(mchan, ldesc: curr, dir: DMA_MEM_TO_MEM, src, dst: dest, |
321 | len: xfer_size); |
322 | |
323 | prev = curr; |
324 | |
325 | mdesc->list_len++; |
326 | src += xfer_size; |
327 | dest += xfer_size; |
328 | len -= xfer_size; |
329 | } |
330 | |
331 | return vchan_tx_prep(vc: &mchan->vc, vd: &mdesc->vd, tx_flags: flags); |
332 | |
333 | free_desc: |
334 | mdc_desc_free(vd: &mdesc->vd); |
335 | |
336 | return NULL; |
337 | } |
338 | |
339 | static int mdc_check_slave_width(struct mdc_chan *mchan, |
340 | enum dma_transfer_direction dir) |
341 | { |
342 | enum dma_slave_buswidth width; |
343 | |
344 | if (dir == DMA_MEM_TO_DEV) |
345 | width = mchan->config.dst_addr_width; |
346 | else |
347 | width = mchan->config.src_addr_width; |
348 | |
349 | switch (width) { |
350 | case DMA_SLAVE_BUSWIDTH_1_BYTE: |
351 | case DMA_SLAVE_BUSWIDTH_2_BYTES: |
352 | case DMA_SLAVE_BUSWIDTH_4_BYTES: |
353 | case DMA_SLAVE_BUSWIDTH_8_BYTES: |
354 | break; |
355 | default: |
356 | return -EINVAL; |
357 | } |
358 | |
359 | if (width > mchan->mdma->bus_width) |
360 | return -EINVAL; |
361 | |
362 | return 0; |
363 | } |
364 | |
365 | static struct dma_async_tx_descriptor *mdc_prep_dma_cyclic( |
366 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, |
367 | size_t period_len, enum dma_transfer_direction dir, |
368 | unsigned long flags) |
369 | { |
370 | struct mdc_chan *mchan = to_mdc_chan(c: chan); |
371 | struct mdc_dma *mdma = mchan->mdma; |
372 | struct mdc_tx_desc *mdesc; |
373 | struct mdc_hw_list_desc *curr, *prev = NULL; |
374 | dma_addr_t curr_phys; |
375 | |
376 | if (!buf_len && !period_len) |
377 | return NULL; |
378 | |
379 | if (!is_slave_direction(direction: dir)) |
380 | return NULL; |
381 | |
382 | if (mdc_check_slave_width(mchan, dir) < 0) |
383 | return NULL; |
384 | |
385 | mdesc = kzalloc(size: sizeof(*mdesc), GFP_NOWAIT); |
386 | if (!mdesc) |
387 | return NULL; |
388 | mdesc->chan = mchan; |
389 | mdesc->cyclic = true; |
390 | mdesc->list_xfer_size = buf_len; |
391 | mdesc->list_period_len = DIV_ROUND_UP(period_len, |
392 | mdma->max_xfer_size); |
393 | |
394 | while (buf_len > 0) { |
395 | size_t remainder = min(period_len, buf_len); |
396 | |
397 | while (remainder > 0) { |
398 | size_t xfer_size; |
399 | |
400 | curr = dma_pool_alloc(pool: mdma->desc_pool, GFP_NOWAIT, |
401 | handle: &curr_phys); |
402 | if (!curr) |
403 | goto free_desc; |
404 | |
405 | if (!prev) { |
406 | mdesc->list_phys = curr_phys; |
407 | mdesc->list = curr; |
408 | } else { |
409 | prev->node_addr = curr_phys; |
410 | prev->next_desc = curr; |
411 | } |
412 | |
413 | xfer_size = min_t(size_t, mdma->max_xfer_size, |
414 | remainder); |
415 | |
416 | if (dir == DMA_MEM_TO_DEV) { |
417 | mdc_list_desc_config(mchan, ldesc: curr, dir, |
418 | src: buf_addr, |
419 | dst: mchan->config.dst_addr, |
420 | len: xfer_size); |
421 | } else { |
422 | mdc_list_desc_config(mchan, ldesc: curr, dir, |
423 | src: mchan->config.src_addr, |
424 | dst: buf_addr, |
425 | len: xfer_size); |
426 | } |
427 | |
428 | prev = curr; |
429 | |
430 | mdesc->list_len++; |
431 | buf_addr += xfer_size; |
432 | buf_len -= xfer_size; |
433 | remainder -= xfer_size; |
434 | } |
435 | } |
436 | prev->node_addr = mdesc->list_phys; |
437 | |
438 | return vchan_tx_prep(vc: &mchan->vc, vd: &mdesc->vd, tx_flags: flags); |
439 | |
440 | free_desc: |
441 | mdc_desc_free(vd: &mdesc->vd); |
442 | |
443 | return NULL; |
444 | } |
445 | |
446 | static struct dma_async_tx_descriptor *mdc_prep_slave_sg( |
447 | struct dma_chan *chan, struct scatterlist *sgl, |
448 | unsigned int sg_len, enum dma_transfer_direction dir, |
449 | unsigned long flags, void *context) |
450 | { |
451 | struct mdc_chan *mchan = to_mdc_chan(c: chan); |
452 | struct mdc_dma *mdma = mchan->mdma; |
453 | struct mdc_tx_desc *mdesc; |
454 | struct scatterlist *sg; |
455 | struct mdc_hw_list_desc *curr, *prev = NULL; |
456 | dma_addr_t curr_phys; |
457 | unsigned int i; |
458 | |
459 | if (!sgl) |
460 | return NULL; |
461 | |
462 | if (!is_slave_direction(direction: dir)) |
463 | return NULL; |
464 | |
465 | if (mdc_check_slave_width(mchan, dir) < 0) |
466 | return NULL; |
467 | |
468 | mdesc = kzalloc(size: sizeof(*mdesc), GFP_NOWAIT); |
469 | if (!mdesc) |
470 | return NULL; |
471 | mdesc->chan = mchan; |
472 | |
473 | for_each_sg(sgl, sg, sg_len, i) { |
474 | dma_addr_t buf = sg_dma_address(sg); |
475 | size_t buf_len = sg_dma_len(sg); |
476 | |
477 | while (buf_len > 0) { |
478 | size_t xfer_size; |
479 | |
480 | curr = dma_pool_alloc(pool: mdma->desc_pool, GFP_NOWAIT, |
481 | handle: &curr_phys); |
482 | if (!curr) |
483 | goto free_desc; |
484 | |
485 | if (!prev) { |
486 | mdesc->list_phys = curr_phys; |
487 | mdesc->list = curr; |
488 | } else { |
489 | prev->node_addr = curr_phys; |
490 | prev->next_desc = curr; |
491 | } |
492 | |
493 | xfer_size = min_t(size_t, mdma->max_xfer_size, |
494 | buf_len); |
495 | |
496 | if (dir == DMA_MEM_TO_DEV) { |
497 | mdc_list_desc_config(mchan, ldesc: curr, dir, src: buf, |
498 | dst: mchan->config.dst_addr, |
499 | len: xfer_size); |
500 | } else { |
501 | mdc_list_desc_config(mchan, ldesc: curr, dir, |
502 | src: mchan->config.src_addr, |
503 | dst: buf, len: xfer_size); |
504 | } |
505 | |
506 | prev = curr; |
507 | |
508 | mdesc->list_len++; |
509 | mdesc->list_xfer_size += xfer_size; |
510 | buf += xfer_size; |
511 | buf_len -= xfer_size; |
512 | } |
513 | } |
514 | |
515 | return vchan_tx_prep(vc: &mchan->vc, vd: &mdesc->vd, tx_flags: flags); |
516 | |
517 | free_desc: |
518 | mdc_desc_free(vd: &mdesc->vd); |
519 | |
520 | return NULL; |
521 | } |
522 | |
523 | static void mdc_issue_desc(struct mdc_chan *mchan) |
524 | { |
525 | struct mdc_dma *mdma = mchan->mdma; |
526 | struct virt_dma_desc *vd; |
527 | struct mdc_tx_desc *mdesc; |
528 | u32 val; |
529 | |
530 | vd = vchan_next_desc(vc: &mchan->vc); |
531 | if (!vd) |
532 | return; |
533 | |
534 | list_del(entry: &vd->node); |
535 | |
536 | mdesc = to_mdc_desc(t: &vd->tx); |
537 | mchan->desc = mdesc; |
538 | |
539 | dev_dbg(mdma2dev(mdma), "Issuing descriptor on channel %d\n" , |
540 | mchan->chan_nr); |
541 | |
542 | mdma->soc->enable_chan(mchan); |
543 | |
544 | val = mdc_chan_readl(mchan, MDC_GENERAL_CONFIG); |
545 | val |= MDC_GENERAL_CONFIG_LIST_IEN | MDC_GENERAL_CONFIG_IEN | |
546 | MDC_GENERAL_CONFIG_LEVEL_INT | MDC_GENERAL_CONFIG_PHYSICAL_W | |
547 | MDC_GENERAL_CONFIG_PHYSICAL_R; |
548 | mdc_chan_writel(mchan, val, MDC_GENERAL_CONFIG); |
549 | val = (mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) | |
550 | (mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) | |
551 | (mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT); |
552 | mdc_chan_writel(mchan, val, MDC_READ_PORT_CONFIG); |
553 | mdc_chan_writel(mchan, val: mdesc->list_phys, MDC_LIST_NODE_ADDRESS); |
554 | val = mdc_chan_readl(mchan, MDC_CONTROL_AND_STATUS); |
555 | val |= MDC_CONTROL_AND_STATUS_LIST_EN; |
556 | mdc_chan_writel(mchan, val, MDC_CONTROL_AND_STATUS); |
557 | } |
558 | |
559 | static void mdc_issue_pending(struct dma_chan *chan) |
560 | { |
561 | struct mdc_chan *mchan = to_mdc_chan(c: chan); |
562 | unsigned long flags; |
563 | |
564 | spin_lock_irqsave(&mchan->vc.lock, flags); |
565 | if (vchan_issue_pending(vc: &mchan->vc) && !mchan->desc) |
566 | mdc_issue_desc(mchan); |
567 | spin_unlock_irqrestore(lock: &mchan->vc.lock, flags); |
568 | } |
569 | |
570 | static enum dma_status mdc_tx_status(struct dma_chan *chan, |
571 | dma_cookie_t cookie, struct dma_tx_state *txstate) |
572 | { |
573 | struct mdc_chan *mchan = to_mdc_chan(c: chan); |
574 | struct mdc_tx_desc *mdesc; |
575 | struct virt_dma_desc *vd; |
576 | unsigned long flags; |
577 | size_t bytes = 0; |
578 | int ret; |
579 | |
580 | ret = dma_cookie_status(chan, cookie, state: txstate); |
581 | if (ret == DMA_COMPLETE) |
582 | return ret; |
583 | |
584 | if (!txstate) |
585 | return ret; |
586 | |
587 | spin_lock_irqsave(&mchan->vc.lock, flags); |
588 | vd = vchan_find_desc(&mchan->vc, cookie); |
589 | if (vd) { |
590 | mdesc = to_mdc_desc(t: &vd->tx); |
591 | bytes = mdesc->list_xfer_size; |
592 | } else if (mchan->desc && mchan->desc->vd.tx.cookie == cookie) { |
593 | struct mdc_hw_list_desc *ldesc; |
594 | u32 val1, val2, done, processed, residue; |
595 | int i, cmds; |
596 | |
597 | mdesc = mchan->desc; |
598 | |
599 | /* |
600 | * Determine the number of commands that haven't been |
601 | * processed (handled by the IRQ handler) yet. |
602 | */ |
603 | do { |
604 | val1 = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED) & |
605 | ~MDC_CMDS_PROCESSED_INT_ACTIVE; |
606 | residue = mdc_chan_readl(mchan, |
607 | MDC_ACTIVE_TRANSFER_SIZE); |
608 | val2 = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED) & |
609 | ~MDC_CMDS_PROCESSED_INT_ACTIVE; |
610 | } while (val1 != val2); |
611 | |
612 | done = (val1 >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) & |
613 | MDC_CMDS_PROCESSED_CMDS_DONE_MASK; |
614 | processed = (val1 >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) & |
615 | MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK; |
616 | cmds = (done - processed) % |
617 | (MDC_CMDS_PROCESSED_CMDS_DONE_MASK + 1); |
618 | |
619 | /* |
620 | * If the command loaded event hasn't been processed yet, then |
621 | * the difference above includes an extra command. |
622 | */ |
623 | if (!mdesc->cmd_loaded) |
624 | cmds--; |
625 | else |
626 | cmds += mdesc->list_cmds_done; |
627 | |
628 | bytes = mdesc->list_xfer_size; |
629 | ldesc = mdesc->list; |
630 | for (i = 0; i < cmds; i++) { |
631 | bytes -= ldesc->xfer_size + 1; |
632 | ldesc = ldesc->next_desc; |
633 | } |
634 | if (ldesc) { |
635 | if (residue != MDC_TRANSFER_SIZE_MASK) |
636 | bytes -= ldesc->xfer_size - residue; |
637 | else |
638 | bytes -= ldesc->xfer_size + 1; |
639 | } |
640 | } |
641 | spin_unlock_irqrestore(lock: &mchan->vc.lock, flags); |
642 | |
643 | dma_set_residue(state: txstate, residue: bytes); |
644 | |
645 | return ret; |
646 | } |
647 | |
648 | static unsigned int mdc_get_new_events(struct mdc_chan *mchan) |
649 | { |
650 | u32 val, processed, done1, done2; |
651 | unsigned int ret; |
652 | |
653 | val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED); |
654 | processed = (val >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) & |
655 | MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK; |
656 | /* |
657 | * CMDS_DONE may have incremented between reading CMDS_PROCESSED |
658 | * and clearing INT_ACTIVE. Re-read CMDS_PROCESSED to ensure we |
659 | * didn't miss a command completion. |
660 | */ |
661 | do { |
662 | val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED); |
663 | |
664 | done1 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) & |
665 | MDC_CMDS_PROCESSED_CMDS_DONE_MASK; |
666 | |
667 | val &= ~((MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK << |
668 | MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) | |
669 | MDC_CMDS_PROCESSED_INT_ACTIVE); |
670 | |
671 | val |= done1 << MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT; |
672 | |
673 | mdc_chan_writel(mchan, val, MDC_CMDS_PROCESSED); |
674 | |
675 | val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED); |
676 | |
677 | done2 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) & |
678 | MDC_CMDS_PROCESSED_CMDS_DONE_MASK; |
679 | } while (done1 != done2); |
680 | |
681 | if (done1 >= processed) |
682 | ret = done1 - processed; |
683 | else |
684 | ret = ((MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK + 1) - |
685 | processed) + done1; |
686 | |
687 | return ret; |
688 | } |
689 | |
690 | static int mdc_terminate_all(struct dma_chan *chan) |
691 | { |
692 | struct mdc_chan *mchan = to_mdc_chan(c: chan); |
693 | unsigned long flags; |
694 | LIST_HEAD(head); |
695 | |
696 | spin_lock_irqsave(&mchan->vc.lock, flags); |
697 | |
698 | mdc_chan_writel(mchan, MDC_CONTROL_AND_STATUS_CANCEL, |
699 | MDC_CONTROL_AND_STATUS); |
700 | |
701 | if (mchan->desc) { |
702 | vchan_terminate_vdesc(vd: &mchan->desc->vd); |
703 | mchan->desc = NULL; |
704 | } |
705 | vchan_get_all_descriptors(vc: &mchan->vc, head: &head); |
706 | |
707 | mdc_get_new_events(mchan); |
708 | |
709 | spin_unlock_irqrestore(lock: &mchan->vc.lock, flags); |
710 | |
711 | vchan_dma_desc_free_list(vc: &mchan->vc, head: &head); |
712 | |
713 | return 0; |
714 | } |
715 | |
716 | static void mdc_synchronize(struct dma_chan *chan) |
717 | { |
718 | struct mdc_chan *mchan = to_mdc_chan(c: chan); |
719 | |
720 | vchan_synchronize(vc: &mchan->vc); |
721 | } |
722 | |
723 | static int mdc_slave_config(struct dma_chan *chan, |
724 | struct dma_slave_config *config) |
725 | { |
726 | struct mdc_chan *mchan = to_mdc_chan(c: chan); |
727 | unsigned long flags; |
728 | |
729 | spin_lock_irqsave(&mchan->vc.lock, flags); |
730 | mchan->config = *config; |
731 | spin_unlock_irqrestore(lock: &mchan->vc.lock, flags); |
732 | |
733 | return 0; |
734 | } |
735 | |
736 | static int mdc_alloc_chan_resources(struct dma_chan *chan) |
737 | { |
738 | struct mdc_chan *mchan = to_mdc_chan(c: chan); |
739 | struct device *dev = mdma2dev(mdma: mchan->mdma); |
740 | |
741 | return pm_runtime_get_sync(dev); |
742 | } |
743 | |
744 | static void mdc_free_chan_resources(struct dma_chan *chan) |
745 | { |
746 | struct mdc_chan *mchan = to_mdc_chan(c: chan); |
747 | struct mdc_dma *mdma = mchan->mdma; |
748 | struct device *dev = mdma2dev(mdma); |
749 | |
750 | mdc_terminate_all(chan); |
751 | mdma->soc->disable_chan(mchan); |
752 | pm_runtime_put(dev); |
753 | } |
754 | |
755 | static irqreturn_t mdc_chan_irq(int irq, void *dev_id) |
756 | { |
757 | struct mdc_chan *mchan = (struct mdc_chan *)dev_id; |
758 | struct mdc_tx_desc *mdesc; |
759 | unsigned int i, new_events; |
760 | |
761 | spin_lock(lock: &mchan->vc.lock); |
762 | |
763 | dev_dbg(mdma2dev(mchan->mdma), "IRQ on channel %d\n" , mchan->chan_nr); |
764 | |
765 | new_events = mdc_get_new_events(mchan); |
766 | |
767 | if (!new_events) |
768 | goto out; |
769 | |
770 | mdesc = mchan->desc; |
771 | if (!mdesc) { |
772 | dev_warn(mdma2dev(mchan->mdma), |
773 | "IRQ with no active descriptor on channel %d\n" , |
774 | mchan->chan_nr); |
775 | goto out; |
776 | } |
777 | |
778 | for (i = 0; i < new_events; i++) { |
779 | /* |
780 | * The first interrupt in a transfer indicates that the |
781 | * command list has been loaded, not that a command has |
782 | * been completed. |
783 | */ |
784 | if (!mdesc->cmd_loaded) { |
785 | mdesc->cmd_loaded = true; |
786 | continue; |
787 | } |
788 | |
789 | mdesc->list_cmds_done++; |
790 | if (mdesc->cyclic) { |
791 | mdesc->list_cmds_done %= mdesc->list_len; |
792 | if (mdesc->list_cmds_done % mdesc->list_period_len == 0) |
793 | vchan_cyclic_callback(vd: &mdesc->vd); |
794 | } else if (mdesc->list_cmds_done == mdesc->list_len) { |
795 | mchan->desc = NULL; |
796 | vchan_cookie_complete(vd: &mdesc->vd); |
797 | mdc_issue_desc(mchan); |
798 | break; |
799 | } |
800 | } |
801 | out: |
802 | spin_unlock(lock: &mchan->vc.lock); |
803 | |
804 | return IRQ_HANDLED; |
805 | } |
806 | |
807 | static struct dma_chan *mdc_of_xlate(struct of_phandle_args *dma_spec, |
808 | struct of_dma *ofdma) |
809 | { |
810 | struct mdc_dma *mdma = ofdma->of_dma_data; |
811 | struct dma_chan *chan; |
812 | |
813 | if (dma_spec->args_count != 3) |
814 | return NULL; |
815 | |
816 | list_for_each_entry(chan, &mdma->dma_dev.channels, device_node) { |
817 | struct mdc_chan *mchan = to_mdc_chan(c: chan); |
818 | |
819 | if (!(dma_spec->args[1] & BIT(mchan->chan_nr))) |
820 | continue; |
821 | if (dma_get_slave_channel(chan)) { |
822 | mchan->periph = dma_spec->args[0]; |
823 | mchan->thread = dma_spec->args[2]; |
824 | return chan; |
825 | } |
826 | } |
827 | |
828 | return NULL; |
829 | } |
830 | |
831 | #define PISTACHIO_CR_PERIPH_DMA_ROUTE(ch) (0x120 + 0x4 * ((ch) / 4)) |
832 | #define PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(ch) (8 * ((ch) % 4)) |
833 | #define PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK 0x3f |
834 | |
835 | static void pistachio_mdc_enable_chan(struct mdc_chan *mchan) |
836 | { |
837 | struct mdc_dma *mdma = mchan->mdma; |
838 | |
839 | regmap_update_bits(map: mdma->periph_regs, |
840 | PISTACHIO_CR_PERIPH_DMA_ROUTE(mchan->chan_nr), |
841 | PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK << |
842 | PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr), |
843 | val: mchan->periph << |
844 | PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr)); |
845 | } |
846 | |
847 | static void pistachio_mdc_disable_chan(struct mdc_chan *mchan) |
848 | { |
849 | struct mdc_dma *mdma = mchan->mdma; |
850 | |
851 | regmap_update_bits(map: mdma->periph_regs, |
852 | PISTACHIO_CR_PERIPH_DMA_ROUTE(mchan->chan_nr), |
853 | PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK << |
854 | PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr), |
855 | val: 0); |
856 | } |
857 | |
858 | static const struct mdc_dma_soc_data pistachio_mdc_data = { |
859 | .enable_chan = pistachio_mdc_enable_chan, |
860 | .disable_chan = pistachio_mdc_disable_chan, |
861 | }; |
862 | |
863 | static const struct of_device_id mdc_dma_of_match[] = { |
864 | { .compatible = "img,pistachio-mdc-dma" , .data = &pistachio_mdc_data, }, |
865 | { }, |
866 | }; |
867 | MODULE_DEVICE_TABLE(of, mdc_dma_of_match); |
868 | |
869 | static int img_mdc_runtime_suspend(struct device *dev) |
870 | { |
871 | struct mdc_dma *mdma = dev_get_drvdata(dev); |
872 | |
873 | clk_disable_unprepare(clk: mdma->clk); |
874 | |
875 | return 0; |
876 | } |
877 | |
878 | static int img_mdc_runtime_resume(struct device *dev) |
879 | { |
880 | struct mdc_dma *mdma = dev_get_drvdata(dev); |
881 | |
882 | return clk_prepare_enable(clk: mdma->clk); |
883 | } |
884 | |
885 | static int mdc_dma_probe(struct platform_device *pdev) |
886 | { |
887 | struct mdc_dma *mdma; |
888 | unsigned int i; |
889 | u32 val; |
890 | int ret; |
891 | |
892 | mdma = devm_kzalloc(dev: &pdev->dev, size: sizeof(*mdma), GFP_KERNEL); |
893 | if (!mdma) |
894 | return -ENOMEM; |
895 | platform_set_drvdata(pdev, data: mdma); |
896 | |
897 | mdma->soc = of_device_get_match_data(dev: &pdev->dev); |
898 | |
899 | mdma->regs = devm_platform_ioremap_resource(pdev, index: 0); |
900 | if (IS_ERR(ptr: mdma->regs)) |
901 | return PTR_ERR(ptr: mdma->regs); |
902 | |
903 | mdma->periph_regs = syscon_regmap_lookup_by_phandle(np: pdev->dev.of_node, |
904 | property: "img,cr-periph" ); |
905 | if (IS_ERR(ptr: mdma->periph_regs)) |
906 | return PTR_ERR(ptr: mdma->periph_regs); |
907 | |
908 | mdma->clk = devm_clk_get(dev: &pdev->dev, id: "sys" ); |
909 | if (IS_ERR(ptr: mdma->clk)) |
910 | return PTR_ERR(ptr: mdma->clk); |
911 | |
912 | dma_cap_zero(mdma->dma_dev.cap_mask); |
913 | dma_cap_set(DMA_SLAVE, mdma->dma_dev.cap_mask); |
914 | dma_cap_set(DMA_PRIVATE, mdma->dma_dev.cap_mask); |
915 | dma_cap_set(DMA_CYCLIC, mdma->dma_dev.cap_mask); |
916 | dma_cap_set(DMA_MEMCPY, mdma->dma_dev.cap_mask); |
917 | |
918 | val = mdc_readl(mdma, MDC_GLOBAL_CONFIG_A); |
919 | mdma->nr_channels = (val >> MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_SHIFT) & |
920 | MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_MASK; |
921 | mdma->nr_threads = |
922 | 1 << ((val >> MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_SHIFT) & |
923 | MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_MASK); |
924 | mdma->bus_width = |
925 | (1 << ((val >> MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_SHIFT) & |
926 | MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_MASK)) / 8; |
927 | /* |
928 | * Although transfer sizes of up to MDC_TRANSFER_SIZE_MASK + 1 bytes |
929 | * are supported, this makes it possible for the value reported in |
930 | * MDC_ACTIVE_TRANSFER_SIZE to be ambiguous - an active transfer size |
931 | * of MDC_TRANSFER_SIZE_MASK may indicate either that 0 bytes or |
932 | * MDC_TRANSFER_SIZE_MASK + 1 bytes are remaining. To eliminate this |
933 | * ambiguity, restrict transfer sizes to one bus-width less than the |
934 | * actual maximum. |
935 | */ |
936 | mdma->max_xfer_size = MDC_TRANSFER_SIZE_MASK + 1 - mdma->bus_width; |
937 | |
938 | of_property_read_u32(np: pdev->dev.of_node, propname: "dma-channels" , |
939 | out_value: &mdma->nr_channels); |
940 | ret = of_property_read_u32(np: pdev->dev.of_node, |
941 | propname: "img,max-burst-multiplier" , |
942 | out_value: &mdma->max_burst_mult); |
943 | if (ret) |
944 | return ret; |
945 | |
946 | mdma->dma_dev.dev = &pdev->dev; |
947 | mdma->dma_dev.device_prep_slave_sg = mdc_prep_slave_sg; |
948 | mdma->dma_dev.device_prep_dma_cyclic = mdc_prep_dma_cyclic; |
949 | mdma->dma_dev.device_prep_dma_memcpy = mdc_prep_dma_memcpy; |
950 | mdma->dma_dev.device_alloc_chan_resources = mdc_alloc_chan_resources; |
951 | mdma->dma_dev.device_free_chan_resources = mdc_free_chan_resources; |
952 | mdma->dma_dev.device_tx_status = mdc_tx_status; |
953 | mdma->dma_dev.device_issue_pending = mdc_issue_pending; |
954 | mdma->dma_dev.device_terminate_all = mdc_terminate_all; |
955 | mdma->dma_dev.device_synchronize = mdc_synchronize; |
956 | mdma->dma_dev.device_config = mdc_slave_config; |
957 | |
958 | mdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); |
959 | mdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
960 | for (i = 1; i <= mdma->bus_width; i <<= 1) { |
961 | mdma->dma_dev.src_addr_widths |= BIT(i); |
962 | mdma->dma_dev.dst_addr_widths |= BIT(i); |
963 | } |
964 | |
965 | INIT_LIST_HEAD(list: &mdma->dma_dev.channels); |
966 | for (i = 0; i < mdma->nr_channels; i++) { |
967 | struct mdc_chan *mchan = &mdma->channels[i]; |
968 | |
969 | mchan->mdma = mdma; |
970 | mchan->chan_nr = i; |
971 | mchan->irq = platform_get_irq(pdev, i); |
972 | if (mchan->irq < 0) |
973 | return mchan->irq; |
974 | |
975 | ret = devm_request_irq(dev: &pdev->dev, irq: mchan->irq, handler: mdc_chan_irq, |
976 | irqflags: IRQ_TYPE_LEVEL_HIGH, |
977 | devname: dev_name(dev: &pdev->dev), dev_id: mchan); |
978 | if (ret < 0) |
979 | return ret; |
980 | |
981 | mchan->vc.desc_free = mdc_desc_free; |
982 | vchan_init(vc: &mchan->vc, dmadev: &mdma->dma_dev); |
983 | } |
984 | |
985 | mdma->desc_pool = dmam_pool_create(name: dev_name(dev: &pdev->dev), dev: &pdev->dev, |
986 | size: sizeof(struct mdc_hw_list_desc), |
987 | align: 4, allocation: 0); |
988 | if (!mdma->desc_pool) |
989 | return -ENOMEM; |
990 | |
991 | pm_runtime_enable(dev: &pdev->dev); |
992 | if (!pm_runtime_enabled(dev: &pdev->dev)) { |
993 | ret = img_mdc_runtime_resume(dev: &pdev->dev); |
994 | if (ret) |
995 | return ret; |
996 | } |
997 | |
998 | ret = dma_async_device_register(device: &mdma->dma_dev); |
999 | if (ret) |
1000 | goto suspend; |
1001 | |
1002 | ret = of_dma_controller_register(np: pdev->dev.of_node, of_dma_xlate: mdc_of_xlate, data: mdma); |
1003 | if (ret) |
1004 | goto unregister; |
1005 | |
1006 | dev_info(&pdev->dev, "MDC with %u channels and %u threads\n" , |
1007 | mdma->nr_channels, mdma->nr_threads); |
1008 | |
1009 | return 0; |
1010 | |
1011 | unregister: |
1012 | dma_async_device_unregister(device: &mdma->dma_dev); |
1013 | suspend: |
1014 | if (!pm_runtime_enabled(dev: &pdev->dev)) |
1015 | img_mdc_runtime_suspend(dev: &pdev->dev); |
1016 | pm_runtime_disable(dev: &pdev->dev); |
1017 | return ret; |
1018 | } |
1019 | |
1020 | static void mdc_dma_remove(struct platform_device *pdev) |
1021 | { |
1022 | struct mdc_dma *mdma = platform_get_drvdata(pdev); |
1023 | struct mdc_chan *mchan, *next; |
1024 | |
1025 | of_dma_controller_free(np: pdev->dev.of_node); |
1026 | dma_async_device_unregister(device: &mdma->dma_dev); |
1027 | |
1028 | list_for_each_entry_safe(mchan, next, &mdma->dma_dev.channels, |
1029 | vc.chan.device_node) { |
1030 | list_del(entry: &mchan->vc.chan.device_node); |
1031 | |
1032 | devm_free_irq(dev: &pdev->dev, irq: mchan->irq, dev_id: mchan); |
1033 | |
1034 | tasklet_kill(t: &mchan->vc.task); |
1035 | } |
1036 | |
1037 | pm_runtime_disable(dev: &pdev->dev); |
1038 | if (!pm_runtime_status_suspended(dev: &pdev->dev)) |
1039 | img_mdc_runtime_suspend(dev: &pdev->dev); |
1040 | } |
1041 | |
1042 | #ifdef CONFIG_PM_SLEEP |
1043 | static int img_mdc_suspend_late(struct device *dev) |
1044 | { |
1045 | struct mdc_dma *mdma = dev_get_drvdata(dev); |
1046 | int i; |
1047 | |
1048 | /* Check that all channels are idle */ |
1049 | for (i = 0; i < mdma->nr_channels; i++) { |
1050 | struct mdc_chan *mchan = &mdma->channels[i]; |
1051 | |
1052 | if (unlikely(mchan->desc)) |
1053 | return -EBUSY; |
1054 | } |
1055 | |
1056 | return pm_runtime_force_suspend(dev); |
1057 | } |
1058 | |
1059 | static int img_mdc_resume_early(struct device *dev) |
1060 | { |
1061 | return pm_runtime_force_resume(dev); |
1062 | } |
1063 | #endif /* CONFIG_PM_SLEEP */ |
1064 | |
1065 | static const struct dev_pm_ops img_mdc_pm_ops = { |
1066 | SET_RUNTIME_PM_OPS(img_mdc_runtime_suspend, |
1067 | img_mdc_runtime_resume, NULL) |
1068 | SET_LATE_SYSTEM_SLEEP_PM_OPS(img_mdc_suspend_late, |
1069 | img_mdc_resume_early) |
1070 | }; |
1071 | |
1072 | static struct platform_driver mdc_dma_driver = { |
1073 | .driver = { |
1074 | .name = "img-mdc-dma" , |
1075 | .pm = &img_mdc_pm_ops, |
1076 | .of_match_table = of_match_ptr(mdc_dma_of_match), |
1077 | }, |
1078 | .probe = mdc_dma_probe, |
1079 | .remove_new = mdc_dma_remove, |
1080 | }; |
1081 | module_platform_driver(mdc_dma_driver); |
1082 | |
1083 | MODULE_DESCRIPTION("IMG Multi-threaded DMA Controller (MDC) driver" ); |
1084 | MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>" ); |
1085 | MODULE_LICENSE("GPL v2" ); |
1086 | |