1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * OMAP DMAengine support |
4 | */ |
5 | #include <linux/cpu_pm.h> |
6 | #include <linux/delay.h> |
7 | #include <linux/dmaengine.h> |
8 | #include <linux/dma-mapping.h> |
9 | #include <linux/dmapool.h> |
10 | #include <linux/err.h> |
11 | #include <linux/init.h> |
12 | #include <linux/interrupt.h> |
13 | #include <linux/list.h> |
14 | #include <linux/module.h> |
15 | #include <linux/omap-dma.h> |
16 | #include <linux/platform_device.h> |
17 | #include <linux/slab.h> |
18 | #include <linux/spinlock.h> |
19 | #include <linux/of.h> |
20 | #include <linux/of_dma.h> |
21 | |
22 | #include "../virt-dma.h" |
23 | |
24 | #define OMAP_SDMA_REQUESTS 127 |
25 | #define OMAP_SDMA_CHANNELS 32 |
26 | |
27 | struct omap_dma_config { |
28 | int lch_end; |
29 | unsigned int rw_priority:1; |
30 | unsigned int needs_busy_check:1; |
31 | unsigned int may_lose_context:1; |
32 | unsigned int needs_lch_clear:1; |
33 | }; |
34 | |
35 | struct omap_dma_context { |
36 | u32 irqenable_l0; |
37 | u32 irqenable_l1; |
38 | u32 ocp_sysconfig; |
39 | u32 gcr; |
40 | }; |
41 | |
42 | struct omap_dmadev { |
43 | struct dma_device ddev; |
44 | spinlock_t lock; |
45 | void __iomem *base; |
46 | const struct omap_dma_reg *reg_map; |
47 | struct omap_system_dma_plat_info *plat; |
48 | const struct omap_dma_config *cfg; |
49 | struct notifier_block nb; |
50 | struct omap_dma_context context; |
51 | int lch_count; |
52 | DECLARE_BITMAP(lch_bitmap, OMAP_SDMA_CHANNELS); |
53 | struct mutex lch_lock; /* for assigning logical channels */ |
54 | bool legacy; |
55 | bool ll123_supported; |
56 | struct dma_pool *desc_pool; |
57 | unsigned dma_requests; |
58 | spinlock_t irq_lock; |
59 | uint32_t irq_enable_mask; |
60 | struct omap_chan **lch_map; |
61 | }; |
62 | |
63 | struct omap_chan { |
64 | struct virt_dma_chan vc; |
65 | void __iomem *channel_base; |
66 | const struct omap_dma_reg *reg_map; |
67 | uint32_t ccr; |
68 | |
69 | struct dma_slave_config cfg; |
70 | unsigned dma_sig; |
71 | bool cyclic; |
72 | bool paused; |
73 | bool running; |
74 | |
75 | int dma_ch; |
76 | struct omap_desc *desc; |
77 | unsigned sgidx; |
78 | }; |
79 | |
80 | #define DESC_NXT_SV_REFRESH (0x1 << 24) |
81 | #define DESC_NXT_SV_REUSE (0x2 << 24) |
82 | #define DESC_NXT_DV_REFRESH (0x1 << 26) |
83 | #define DESC_NXT_DV_REUSE (0x2 << 26) |
84 | #define DESC_NTYPE_TYPE2 (0x2 << 29) |
85 | |
86 | /* Type 2 descriptor with Source or Destination address update */ |
87 | struct omap_type2_desc { |
88 | uint32_t next_desc; |
89 | uint32_t en; |
90 | uint32_t addr; /* src or dst */ |
91 | uint16_t fn; |
92 | uint16_t cicr; |
93 | int16_t cdei; |
94 | int16_t csei; |
95 | int32_t cdfi; |
96 | int32_t csfi; |
97 | } __packed; |
98 | |
99 | struct omap_sg { |
100 | dma_addr_t addr; |
101 | uint32_t en; /* number of elements (24-bit) */ |
102 | uint32_t fn; /* number of frames (16-bit) */ |
103 | int32_t fi; /* for double indexing */ |
104 | int16_t ei; /* for double indexing */ |
105 | |
106 | /* Linked list */ |
107 | struct omap_type2_desc *t2_desc; |
108 | dma_addr_t t2_desc_paddr; |
109 | }; |
110 | |
111 | struct omap_desc { |
112 | struct virt_dma_desc vd; |
113 | bool using_ll; |
114 | enum dma_transfer_direction dir; |
115 | dma_addr_t dev_addr; |
116 | bool polled; |
117 | |
118 | int32_t fi; /* for OMAP_DMA_SYNC_PACKET / double indexing */ |
119 | int16_t ei; /* for double indexing */ |
120 | uint8_t es; /* CSDP_DATA_TYPE_xxx */ |
121 | uint32_t ccr; /* CCR value */ |
122 | uint16_t clnk_ctrl; /* CLNK_CTRL value */ |
123 | uint16_t cicr; /* CICR value */ |
124 | uint32_t csdp; /* CSDP value */ |
125 | |
126 | unsigned sglen; |
127 | struct omap_sg sg[] __counted_by(sglen); |
128 | }; |
129 | |
130 | enum { |
131 | CAPS_0_SUPPORT_LL123 = BIT(20), /* Linked List type1/2/3 */ |
132 | CAPS_0_SUPPORT_LL4 = BIT(21), /* Linked List type4 */ |
133 | |
134 | CCR_FS = BIT(5), |
135 | CCR_READ_PRIORITY = BIT(6), |
136 | CCR_ENABLE = BIT(7), |
137 | CCR_AUTO_INIT = BIT(8), /* OMAP1 only */ |
138 | CCR_REPEAT = BIT(9), /* OMAP1 only */ |
139 | CCR_OMAP31_DISABLE = BIT(10), /* OMAP1 only */ |
140 | CCR_SUSPEND_SENSITIVE = BIT(8), /* OMAP2+ only */ |
141 | CCR_RD_ACTIVE = BIT(9), /* OMAP2+ only */ |
142 | CCR_WR_ACTIVE = BIT(10), /* OMAP2+ only */ |
143 | CCR_SRC_AMODE_CONSTANT = 0 << 12, |
144 | CCR_SRC_AMODE_POSTINC = 1 << 12, |
145 | CCR_SRC_AMODE_SGLIDX = 2 << 12, |
146 | CCR_SRC_AMODE_DBLIDX = 3 << 12, |
147 | CCR_DST_AMODE_CONSTANT = 0 << 14, |
148 | CCR_DST_AMODE_POSTINC = 1 << 14, |
149 | CCR_DST_AMODE_SGLIDX = 2 << 14, |
150 | CCR_DST_AMODE_DBLIDX = 3 << 14, |
151 | CCR_CONSTANT_FILL = BIT(16), |
152 | CCR_TRANSPARENT_COPY = BIT(17), |
153 | CCR_BS = BIT(18), |
154 | CCR_SUPERVISOR = BIT(22), |
155 | CCR_PREFETCH = BIT(23), |
156 | CCR_TRIGGER_SRC = BIT(24), |
157 | CCR_BUFFERING_DISABLE = BIT(25), |
158 | CCR_WRITE_PRIORITY = BIT(26), |
159 | CCR_SYNC_ELEMENT = 0, |
160 | CCR_SYNC_FRAME = CCR_FS, |
161 | CCR_SYNC_BLOCK = CCR_BS, |
162 | CCR_SYNC_PACKET = CCR_BS | CCR_FS, |
163 | |
164 | CSDP_DATA_TYPE_8 = 0, |
165 | CSDP_DATA_TYPE_16 = 1, |
166 | CSDP_DATA_TYPE_32 = 2, |
167 | CSDP_SRC_PORT_EMIFF = 0 << 2, /* OMAP1 only */ |
168 | CSDP_SRC_PORT_EMIFS = 1 << 2, /* OMAP1 only */ |
169 | CSDP_SRC_PORT_OCP_T1 = 2 << 2, /* OMAP1 only */ |
170 | CSDP_SRC_PORT_TIPB = 3 << 2, /* OMAP1 only */ |
171 | CSDP_SRC_PORT_OCP_T2 = 4 << 2, /* OMAP1 only */ |
172 | CSDP_SRC_PORT_MPUI = 5 << 2, /* OMAP1 only */ |
173 | CSDP_SRC_PACKED = BIT(6), |
174 | CSDP_SRC_BURST_1 = 0 << 7, |
175 | CSDP_SRC_BURST_16 = 1 << 7, |
176 | CSDP_SRC_BURST_32 = 2 << 7, |
177 | CSDP_SRC_BURST_64 = 3 << 7, |
178 | CSDP_DST_PORT_EMIFF = 0 << 9, /* OMAP1 only */ |
179 | CSDP_DST_PORT_EMIFS = 1 << 9, /* OMAP1 only */ |
180 | CSDP_DST_PORT_OCP_T1 = 2 << 9, /* OMAP1 only */ |
181 | CSDP_DST_PORT_TIPB = 3 << 9, /* OMAP1 only */ |
182 | CSDP_DST_PORT_OCP_T2 = 4 << 9, /* OMAP1 only */ |
183 | CSDP_DST_PORT_MPUI = 5 << 9, /* OMAP1 only */ |
184 | CSDP_DST_PACKED = BIT(13), |
185 | CSDP_DST_BURST_1 = 0 << 14, |
186 | CSDP_DST_BURST_16 = 1 << 14, |
187 | CSDP_DST_BURST_32 = 2 << 14, |
188 | CSDP_DST_BURST_64 = 3 << 14, |
189 | CSDP_WRITE_NON_POSTED = 0 << 16, |
190 | CSDP_WRITE_POSTED = 1 << 16, |
191 | CSDP_WRITE_LAST_NON_POSTED = 2 << 16, |
192 | |
193 | CICR_TOUT_IE = BIT(0), /* OMAP1 only */ |
194 | CICR_DROP_IE = BIT(1), |
195 | CICR_HALF_IE = BIT(2), |
196 | CICR_FRAME_IE = BIT(3), |
197 | CICR_LAST_IE = BIT(4), |
198 | CICR_BLOCK_IE = BIT(5), |
199 | CICR_PKT_IE = BIT(7), /* OMAP2+ only */ |
200 | CICR_TRANS_ERR_IE = BIT(8), /* OMAP2+ only */ |
201 | CICR_SUPERVISOR_ERR_IE = BIT(10), /* OMAP2+ only */ |
202 | CICR_MISALIGNED_ERR_IE = BIT(11), /* OMAP2+ only */ |
203 | CICR_DRAIN_IE = BIT(12), /* OMAP2+ only */ |
204 | CICR_SUPER_BLOCK_IE = BIT(14), /* OMAP2+ only */ |
205 | |
206 | CLNK_CTRL_ENABLE_LNK = BIT(15), |
207 | |
208 | CDP_DST_VALID_INC = 0 << 0, |
209 | CDP_DST_VALID_RELOAD = 1 << 0, |
210 | CDP_DST_VALID_REUSE = 2 << 0, |
211 | CDP_SRC_VALID_INC = 0 << 2, |
212 | CDP_SRC_VALID_RELOAD = 1 << 2, |
213 | CDP_SRC_VALID_REUSE = 2 << 2, |
214 | CDP_NTYPE_TYPE1 = 1 << 4, |
215 | CDP_NTYPE_TYPE2 = 2 << 4, |
216 | CDP_NTYPE_TYPE3 = 3 << 4, |
217 | CDP_TMODE_NORMAL = 0 << 8, |
218 | CDP_TMODE_LLIST = 1 << 8, |
219 | CDP_FAST = BIT(10), |
220 | }; |
221 | |
222 | static const unsigned es_bytes[] = { |
223 | [CSDP_DATA_TYPE_8] = 1, |
224 | [CSDP_DATA_TYPE_16] = 2, |
225 | [CSDP_DATA_TYPE_32] = 4, |
226 | }; |
227 | |
228 | static bool omap_dma_filter_fn(struct dma_chan *chan, void *param); |
229 | static struct of_dma_filter_info omap_dma_info = { |
230 | .filter_fn = omap_dma_filter_fn, |
231 | }; |
232 | |
233 | static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d) |
234 | { |
235 | return container_of(d, struct omap_dmadev, ddev); |
236 | } |
237 | |
238 | static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c) |
239 | { |
240 | return container_of(c, struct omap_chan, vc.chan); |
241 | } |
242 | |
243 | static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t) |
244 | { |
245 | return container_of(t, struct omap_desc, vd.tx); |
246 | } |
247 | |
248 | static void omap_dma_desc_free(struct virt_dma_desc *vd) |
249 | { |
250 | struct omap_desc *d = to_omap_dma_desc(t: &vd->tx); |
251 | |
252 | if (d->using_ll) { |
253 | struct omap_dmadev *od = to_omap_dma_dev(d: vd->tx.chan->device); |
254 | int i; |
255 | |
256 | for (i = 0; i < d->sglen; i++) { |
257 | if (d->sg[i].t2_desc) |
258 | dma_pool_free(pool: od->desc_pool, vaddr: d->sg[i].t2_desc, |
259 | addr: d->sg[i].t2_desc_paddr); |
260 | } |
261 | } |
262 | |
263 | kfree(objp: d); |
264 | } |
265 | |
266 | static void omap_dma_fill_type2_desc(struct omap_desc *d, int idx, |
267 | enum dma_transfer_direction dir, bool last) |
268 | { |
269 | struct omap_sg *sg = &d->sg[idx]; |
270 | struct omap_type2_desc *t2_desc = sg->t2_desc; |
271 | |
272 | if (idx) |
273 | d->sg[idx - 1].t2_desc->next_desc = sg->t2_desc_paddr; |
274 | if (last) |
275 | t2_desc->next_desc = 0xfffffffc; |
276 | |
277 | t2_desc->en = sg->en; |
278 | t2_desc->addr = sg->addr; |
279 | t2_desc->fn = sg->fn & 0xffff; |
280 | t2_desc->cicr = d->cicr; |
281 | if (!last) |
282 | t2_desc->cicr &= ~CICR_BLOCK_IE; |
283 | |
284 | switch (dir) { |
285 | case DMA_DEV_TO_MEM: |
286 | t2_desc->cdei = sg->ei; |
287 | t2_desc->csei = d->ei; |
288 | t2_desc->cdfi = sg->fi; |
289 | t2_desc->csfi = d->fi; |
290 | |
291 | t2_desc->en |= DESC_NXT_DV_REFRESH; |
292 | t2_desc->en |= DESC_NXT_SV_REUSE; |
293 | break; |
294 | case DMA_MEM_TO_DEV: |
295 | t2_desc->cdei = d->ei; |
296 | t2_desc->csei = sg->ei; |
297 | t2_desc->cdfi = d->fi; |
298 | t2_desc->csfi = sg->fi; |
299 | |
300 | t2_desc->en |= DESC_NXT_SV_REFRESH; |
301 | t2_desc->en |= DESC_NXT_DV_REUSE; |
302 | break; |
303 | default: |
304 | return; |
305 | } |
306 | |
307 | t2_desc->en |= DESC_NTYPE_TYPE2; |
308 | } |
309 | |
310 | static void omap_dma_write(uint32_t val, unsigned type, void __iomem *addr) |
311 | { |
312 | switch (type) { |
313 | case OMAP_DMA_REG_16BIT: |
314 | writew_relaxed(val, addr); |
315 | break; |
316 | case OMAP_DMA_REG_2X16BIT: |
317 | writew_relaxed(val, addr); |
318 | writew_relaxed(val >> 16, addr + 2); |
319 | break; |
320 | case OMAP_DMA_REG_32BIT: |
321 | writel_relaxed(val, addr); |
322 | break; |
323 | default: |
324 | WARN_ON(1); |
325 | } |
326 | } |
327 | |
328 | static unsigned omap_dma_read(unsigned type, void __iomem *addr) |
329 | { |
330 | unsigned val; |
331 | |
332 | switch (type) { |
333 | case OMAP_DMA_REG_16BIT: |
334 | val = readw_relaxed(addr); |
335 | break; |
336 | case OMAP_DMA_REG_2X16BIT: |
337 | val = readw_relaxed(addr); |
338 | val |= readw_relaxed(addr + 2) << 16; |
339 | break; |
340 | case OMAP_DMA_REG_32BIT: |
341 | val = readl_relaxed(addr); |
342 | break; |
343 | default: |
344 | WARN_ON(1); |
345 | val = 0; |
346 | } |
347 | |
348 | return val; |
349 | } |
350 | |
351 | static void omap_dma_glbl_write(struct omap_dmadev *od, unsigned reg, unsigned val) |
352 | { |
353 | const struct omap_dma_reg *r = od->reg_map + reg; |
354 | |
355 | WARN_ON(r->stride); |
356 | |
357 | omap_dma_write(val, type: r->type, addr: od->base + r->offset); |
358 | } |
359 | |
360 | static unsigned omap_dma_glbl_read(struct omap_dmadev *od, unsigned reg) |
361 | { |
362 | const struct omap_dma_reg *r = od->reg_map + reg; |
363 | |
364 | WARN_ON(r->stride); |
365 | |
366 | return omap_dma_read(type: r->type, addr: od->base + r->offset); |
367 | } |
368 | |
369 | static void omap_dma_chan_write(struct omap_chan *c, unsigned reg, unsigned val) |
370 | { |
371 | const struct omap_dma_reg *r = c->reg_map + reg; |
372 | |
373 | omap_dma_write(val, type: r->type, addr: c->channel_base + r->offset); |
374 | } |
375 | |
376 | static unsigned omap_dma_chan_read(struct omap_chan *c, unsigned reg) |
377 | { |
378 | const struct omap_dma_reg *r = c->reg_map + reg; |
379 | |
380 | return omap_dma_read(type: r->type, addr: c->channel_base + r->offset); |
381 | } |
382 | |
383 | static void omap_dma_clear_csr(struct omap_chan *c) |
384 | { |
385 | if (dma_omap1()) |
386 | omap_dma_chan_read(c, reg: CSR); |
387 | else |
388 | omap_dma_chan_write(c, reg: CSR, val: ~0); |
389 | } |
390 | |
391 | static unsigned omap_dma_get_csr(struct omap_chan *c) |
392 | { |
393 | unsigned val = omap_dma_chan_read(c, reg: CSR); |
394 | |
395 | if (!dma_omap1()) |
396 | omap_dma_chan_write(c, reg: CSR, val); |
397 | |
398 | return val; |
399 | } |
400 | |
401 | static void omap_dma_clear_lch(struct omap_dmadev *od, int lch) |
402 | { |
403 | struct omap_chan *c; |
404 | int i; |
405 | |
406 | c = od->lch_map[lch]; |
407 | if (!c) |
408 | return; |
409 | |
410 | for (i = CSDP; i <= od->cfg->lch_end; i++) |
411 | omap_dma_chan_write(c, reg: i, val: 0); |
412 | } |
413 | |
414 | static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c, |
415 | unsigned lch) |
416 | { |
417 | c->channel_base = od->base + od->plat->channel_stride * lch; |
418 | |
419 | od->lch_map[lch] = c; |
420 | } |
421 | |
422 | static void omap_dma_start(struct omap_chan *c, struct omap_desc *d) |
423 | { |
424 | struct omap_dmadev *od = to_omap_dma_dev(d: c->vc.chan.device); |
425 | uint16_t cicr = d->cicr; |
426 | |
427 | if (__dma_omap15xx(od->plat->dma_attr)) |
428 | omap_dma_chan_write(c, reg: CPC, val: 0); |
429 | else |
430 | omap_dma_chan_write(c, reg: CDAC, val: 0); |
431 | |
432 | omap_dma_clear_csr(c); |
433 | |
434 | if (d->using_ll) { |
435 | uint32_t cdp = CDP_TMODE_LLIST | CDP_NTYPE_TYPE2 | CDP_FAST; |
436 | |
437 | if (d->dir == DMA_DEV_TO_MEM) |
438 | cdp |= (CDP_DST_VALID_RELOAD | CDP_SRC_VALID_REUSE); |
439 | else |
440 | cdp |= (CDP_DST_VALID_REUSE | CDP_SRC_VALID_RELOAD); |
441 | omap_dma_chan_write(c, reg: CDP, val: cdp); |
442 | |
443 | omap_dma_chan_write(c, reg: CNDP, val: d->sg[0].t2_desc_paddr); |
444 | omap_dma_chan_write(c, reg: CCDN, val: 0); |
445 | omap_dma_chan_write(c, reg: CCFN, val: 0xffff); |
446 | omap_dma_chan_write(c, reg: CCEN, val: 0xffffff); |
447 | |
448 | cicr &= ~CICR_BLOCK_IE; |
449 | } else if (od->ll123_supported) { |
450 | omap_dma_chan_write(c, reg: CDP, val: 0); |
451 | } |
452 | |
453 | /* Enable interrupts */ |
454 | omap_dma_chan_write(c, reg: CICR, val: cicr); |
455 | |
456 | /* Enable channel */ |
457 | omap_dma_chan_write(c, reg: CCR, val: d->ccr | CCR_ENABLE); |
458 | |
459 | c->running = true; |
460 | } |
461 | |
462 | static void omap_dma_drain_chan(struct omap_chan *c) |
463 | { |
464 | int i; |
465 | u32 val; |
466 | |
467 | /* Wait for sDMA FIFO to drain */ |
468 | for (i = 0; ; i++) { |
469 | val = omap_dma_chan_read(c, reg: CCR); |
470 | if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))) |
471 | break; |
472 | |
473 | if (i > 100) |
474 | break; |
475 | |
476 | udelay(5); |
477 | } |
478 | |
479 | if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)) |
480 | dev_err(c->vc.chan.device->dev, |
481 | "DMA drain did not complete on lch %d\n" , |
482 | c->dma_ch); |
483 | } |
484 | |
485 | static int omap_dma_stop(struct omap_chan *c) |
486 | { |
487 | struct omap_dmadev *od = to_omap_dma_dev(d: c->vc.chan.device); |
488 | uint32_t val; |
489 | |
490 | /* disable irq */ |
491 | omap_dma_chan_write(c, reg: CICR, val: 0); |
492 | |
493 | omap_dma_clear_csr(c); |
494 | |
495 | val = omap_dma_chan_read(c, reg: CCR); |
496 | if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) { |
497 | uint32_t sysconfig; |
498 | |
499 | sysconfig = omap_dma_glbl_read(od, reg: OCP_SYSCONFIG); |
500 | val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK; |
501 | val |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE); |
502 | omap_dma_glbl_write(od, reg: OCP_SYSCONFIG, val); |
503 | |
504 | val = omap_dma_chan_read(c, reg: CCR); |
505 | val &= ~CCR_ENABLE; |
506 | omap_dma_chan_write(c, reg: CCR, val); |
507 | |
508 | if (!(c->ccr & CCR_BUFFERING_DISABLE)) |
509 | omap_dma_drain_chan(c); |
510 | |
511 | omap_dma_glbl_write(od, reg: OCP_SYSCONFIG, val: sysconfig); |
512 | } else { |
513 | if (!(val & CCR_ENABLE)) |
514 | return -EINVAL; |
515 | |
516 | val &= ~CCR_ENABLE; |
517 | omap_dma_chan_write(c, reg: CCR, val); |
518 | |
519 | if (!(c->ccr & CCR_BUFFERING_DISABLE)) |
520 | omap_dma_drain_chan(c); |
521 | } |
522 | |
523 | mb(); |
524 | |
525 | if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) { |
526 | val = omap_dma_chan_read(c, reg: CLNK_CTRL); |
527 | |
528 | if (dma_omap1()) |
529 | val |= 1 << 14; /* set the STOP_LNK bit */ |
530 | else |
531 | val &= ~CLNK_CTRL_ENABLE_LNK; |
532 | |
533 | omap_dma_chan_write(c, reg: CLNK_CTRL, val); |
534 | } |
535 | c->running = false; |
536 | return 0; |
537 | } |
538 | |
539 | static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d) |
540 | { |
541 | struct omap_sg *sg = d->sg + c->sgidx; |
542 | unsigned cxsa, cxei, cxfi; |
543 | |
544 | if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) { |
545 | cxsa = CDSA; |
546 | cxei = CDEI; |
547 | cxfi = CDFI; |
548 | } else { |
549 | cxsa = CSSA; |
550 | cxei = CSEI; |
551 | cxfi = CSFI; |
552 | } |
553 | |
554 | omap_dma_chan_write(c, reg: cxsa, val: sg->addr); |
555 | omap_dma_chan_write(c, reg: cxei, val: sg->ei); |
556 | omap_dma_chan_write(c, reg: cxfi, val: sg->fi); |
557 | omap_dma_chan_write(c, reg: CEN, val: sg->en); |
558 | omap_dma_chan_write(c, reg: CFN, val: sg->fn); |
559 | |
560 | omap_dma_start(c, d); |
561 | c->sgidx++; |
562 | } |
563 | |
564 | static void omap_dma_start_desc(struct omap_chan *c) |
565 | { |
566 | struct virt_dma_desc *vd = vchan_next_desc(vc: &c->vc); |
567 | struct omap_desc *d; |
568 | unsigned cxsa, cxei, cxfi; |
569 | |
570 | if (!vd) { |
571 | c->desc = NULL; |
572 | return; |
573 | } |
574 | |
575 | list_del(entry: &vd->node); |
576 | |
577 | c->desc = d = to_omap_dma_desc(t: &vd->tx); |
578 | c->sgidx = 0; |
579 | |
580 | /* |
581 | * This provides the necessary barrier to ensure data held in |
582 | * DMA coherent memory is visible to the DMA engine prior to |
583 | * the transfer starting. |
584 | */ |
585 | mb(); |
586 | |
587 | omap_dma_chan_write(c, reg: CCR, val: d->ccr); |
588 | if (dma_omap1()) |
589 | omap_dma_chan_write(c, reg: CCR2, val: d->ccr >> 16); |
590 | |
591 | if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) { |
592 | cxsa = CSSA; |
593 | cxei = CSEI; |
594 | cxfi = CSFI; |
595 | } else { |
596 | cxsa = CDSA; |
597 | cxei = CDEI; |
598 | cxfi = CDFI; |
599 | } |
600 | |
601 | omap_dma_chan_write(c, reg: cxsa, val: d->dev_addr); |
602 | omap_dma_chan_write(c, reg: cxei, val: d->ei); |
603 | omap_dma_chan_write(c, reg: cxfi, val: d->fi); |
604 | omap_dma_chan_write(c, reg: CSDP, val: d->csdp); |
605 | omap_dma_chan_write(c, reg: CLNK_CTRL, val: d->clnk_ctrl); |
606 | |
607 | omap_dma_start_sg(c, d); |
608 | } |
609 | |
610 | static void omap_dma_callback(int ch, u16 status, void *data) |
611 | { |
612 | struct omap_chan *c = data; |
613 | struct omap_desc *d; |
614 | unsigned long flags; |
615 | |
616 | spin_lock_irqsave(&c->vc.lock, flags); |
617 | d = c->desc; |
618 | if (d) { |
619 | if (c->cyclic) { |
620 | vchan_cyclic_callback(vd: &d->vd); |
621 | } else if (d->using_ll || c->sgidx == d->sglen) { |
622 | omap_dma_start_desc(c); |
623 | vchan_cookie_complete(vd: &d->vd); |
624 | } else { |
625 | omap_dma_start_sg(c, d); |
626 | } |
627 | } |
628 | spin_unlock_irqrestore(lock: &c->vc.lock, flags); |
629 | } |
630 | |
631 | static irqreturn_t omap_dma_irq(int irq, void *devid) |
632 | { |
633 | struct omap_dmadev *od = devid; |
634 | unsigned status, channel; |
635 | |
636 | spin_lock(lock: &od->irq_lock); |
637 | |
638 | status = omap_dma_glbl_read(od, reg: IRQSTATUS_L1); |
639 | status &= od->irq_enable_mask; |
640 | if (status == 0) { |
641 | spin_unlock(lock: &od->irq_lock); |
642 | return IRQ_NONE; |
643 | } |
644 | |
645 | while ((channel = ffs(status)) != 0) { |
646 | unsigned mask, csr; |
647 | struct omap_chan *c; |
648 | |
649 | channel -= 1; |
650 | mask = BIT(channel); |
651 | status &= ~mask; |
652 | |
653 | c = od->lch_map[channel]; |
654 | if (c == NULL) { |
655 | /* This should never happen */ |
656 | dev_err(od->ddev.dev, "invalid channel %u\n" , channel); |
657 | continue; |
658 | } |
659 | |
660 | csr = omap_dma_get_csr(c); |
661 | omap_dma_glbl_write(od, reg: IRQSTATUS_L1, val: mask); |
662 | |
663 | omap_dma_callback(ch: channel, status: csr, data: c); |
664 | } |
665 | |
666 | spin_unlock(lock: &od->irq_lock); |
667 | |
668 | return IRQ_HANDLED; |
669 | } |
670 | |
671 | static int omap_dma_get_lch(struct omap_dmadev *od, int *lch) |
672 | { |
673 | int channel; |
674 | |
675 | mutex_lock(&od->lch_lock); |
676 | channel = find_first_zero_bit(addr: od->lch_bitmap, size: od->lch_count); |
677 | if (channel >= od->lch_count) |
678 | goto out_busy; |
679 | set_bit(nr: channel, addr: od->lch_bitmap); |
680 | mutex_unlock(lock: &od->lch_lock); |
681 | |
682 | omap_dma_clear_lch(od, lch: channel); |
683 | *lch = channel; |
684 | |
685 | return 0; |
686 | |
687 | out_busy: |
688 | mutex_unlock(lock: &od->lch_lock); |
689 | *lch = -EINVAL; |
690 | |
691 | return -EBUSY; |
692 | } |
693 | |
694 | static void omap_dma_put_lch(struct omap_dmadev *od, int lch) |
695 | { |
696 | omap_dma_clear_lch(od, lch); |
697 | mutex_lock(&od->lch_lock); |
698 | clear_bit(nr: lch, addr: od->lch_bitmap); |
699 | mutex_unlock(lock: &od->lch_lock); |
700 | } |
701 | |
702 | static inline bool omap_dma_legacy(struct omap_dmadev *od) |
703 | { |
704 | return IS_ENABLED(CONFIG_ARCH_OMAP1) && od->legacy; |
705 | } |
706 | |
707 | static int omap_dma_alloc_chan_resources(struct dma_chan *chan) |
708 | { |
709 | struct omap_dmadev *od = to_omap_dma_dev(d: chan->device); |
710 | struct omap_chan *c = to_omap_dma_chan(c: chan); |
711 | struct device *dev = od->ddev.dev; |
712 | int ret; |
713 | |
714 | if (omap_dma_legacy(od)) { |
715 | ret = omap_request_dma(dev_id: c->dma_sig, dev_name: "DMA engine" , |
716 | callback: omap_dma_callback, data: c, dma_ch: &c->dma_ch); |
717 | } else { |
718 | ret = omap_dma_get_lch(od, lch: &c->dma_ch); |
719 | } |
720 | |
721 | dev_dbg(dev, "allocating channel %u for %u\n" , c->dma_ch, c->dma_sig); |
722 | |
723 | if (ret >= 0) { |
724 | omap_dma_assign(od, c, lch: c->dma_ch); |
725 | |
726 | if (!omap_dma_legacy(od)) { |
727 | unsigned val; |
728 | |
729 | spin_lock_irq(lock: &od->irq_lock); |
730 | val = BIT(c->dma_ch); |
731 | omap_dma_glbl_write(od, reg: IRQSTATUS_L1, val); |
732 | od->irq_enable_mask |= val; |
733 | omap_dma_glbl_write(od, reg: IRQENABLE_L1, val: od->irq_enable_mask); |
734 | |
735 | val = omap_dma_glbl_read(od, reg: IRQENABLE_L0); |
736 | val &= ~BIT(c->dma_ch); |
737 | omap_dma_glbl_write(od, reg: IRQENABLE_L0, val); |
738 | spin_unlock_irq(lock: &od->irq_lock); |
739 | } |
740 | } |
741 | |
742 | if (dma_omap1()) { |
743 | if (__dma_omap16xx(od->plat->dma_attr)) { |
744 | c->ccr = CCR_OMAP31_DISABLE; |
745 | /* Duplicate what plat-omap/dma.c does */ |
746 | c->ccr |= c->dma_ch + 1; |
747 | } else { |
748 | c->ccr = c->dma_sig & 0x1f; |
749 | } |
750 | } else { |
751 | c->ccr = c->dma_sig & 0x1f; |
752 | c->ccr |= (c->dma_sig & ~0x1f) << 14; |
753 | } |
754 | if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING) |
755 | c->ccr |= CCR_BUFFERING_DISABLE; |
756 | |
757 | return ret; |
758 | } |
759 | |
760 | static void omap_dma_free_chan_resources(struct dma_chan *chan) |
761 | { |
762 | struct omap_dmadev *od = to_omap_dma_dev(d: chan->device); |
763 | struct omap_chan *c = to_omap_dma_chan(c: chan); |
764 | |
765 | if (!omap_dma_legacy(od)) { |
766 | spin_lock_irq(lock: &od->irq_lock); |
767 | od->irq_enable_mask &= ~BIT(c->dma_ch); |
768 | omap_dma_glbl_write(od, reg: IRQENABLE_L1, val: od->irq_enable_mask); |
769 | spin_unlock_irq(lock: &od->irq_lock); |
770 | } |
771 | |
772 | c->channel_base = NULL; |
773 | od->lch_map[c->dma_ch] = NULL; |
774 | vchan_free_chan_resources(vc: &c->vc); |
775 | |
776 | if (omap_dma_legacy(od)) |
777 | omap_free_dma(ch: c->dma_ch); |
778 | else |
779 | omap_dma_put_lch(od, lch: c->dma_ch); |
780 | |
781 | dev_dbg(od->ddev.dev, "freeing channel %u used for %u\n" , c->dma_ch, |
782 | c->dma_sig); |
783 | c->dma_sig = 0; |
784 | } |
785 | |
786 | static size_t omap_dma_sg_size(struct omap_sg *sg) |
787 | { |
788 | return sg->en * sg->fn; |
789 | } |
790 | |
791 | static size_t omap_dma_desc_size(struct omap_desc *d) |
792 | { |
793 | unsigned i; |
794 | size_t size; |
795 | |
796 | for (size = i = 0; i < d->sglen; i++) |
797 | size += omap_dma_sg_size(sg: &d->sg[i]); |
798 | |
799 | return size * es_bytes[d->es]; |
800 | } |
801 | |
802 | static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr) |
803 | { |
804 | unsigned i; |
805 | size_t size, es_size = es_bytes[d->es]; |
806 | |
807 | for (size = i = 0; i < d->sglen; i++) { |
808 | size_t this_size = omap_dma_sg_size(sg: &d->sg[i]) * es_size; |
809 | |
810 | if (size) |
811 | size += this_size; |
812 | else if (addr >= d->sg[i].addr && |
813 | addr < d->sg[i].addr + this_size) |
814 | size += d->sg[i].addr + this_size - addr; |
815 | } |
816 | return size; |
817 | } |
818 | |
819 | /* |
820 | * OMAP 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is |
821 | * read before the DMA controller finished disabling the channel. |
822 | */ |
823 | static uint32_t omap_dma_chan_read_3_3(struct omap_chan *c, unsigned reg) |
824 | { |
825 | struct omap_dmadev *od = to_omap_dma_dev(d: c->vc.chan.device); |
826 | uint32_t val; |
827 | |
828 | val = omap_dma_chan_read(c, reg); |
829 | if (val == 0 && od->plat->errata & DMA_ERRATA_3_3) |
830 | val = omap_dma_chan_read(c, reg); |
831 | |
832 | return val; |
833 | } |
834 | |
835 | static dma_addr_t omap_dma_get_src_pos(struct omap_chan *c) |
836 | { |
837 | struct omap_dmadev *od = to_omap_dma_dev(d: c->vc.chan.device); |
838 | dma_addr_t addr, cdac; |
839 | |
840 | if (__dma_omap15xx(od->plat->dma_attr)) { |
841 | addr = omap_dma_chan_read(c, reg: CPC); |
842 | } else { |
843 | addr = omap_dma_chan_read_3_3(c, reg: CSAC); |
844 | cdac = omap_dma_chan_read_3_3(c, reg: CDAC); |
845 | |
846 | /* |
847 | * CDAC == 0 indicates that the DMA transfer on the channel has |
848 | * not been started (no data has been transferred so far). |
849 | * Return the programmed source start address in this case. |
850 | */ |
851 | if (cdac == 0) |
852 | addr = omap_dma_chan_read(c, reg: CSSA); |
853 | } |
854 | |
855 | if (dma_omap1()) |
856 | addr |= omap_dma_chan_read(c, reg: CSSA) & 0xffff0000; |
857 | |
858 | return addr; |
859 | } |
860 | |
861 | static dma_addr_t omap_dma_get_dst_pos(struct omap_chan *c) |
862 | { |
863 | struct omap_dmadev *od = to_omap_dma_dev(d: c->vc.chan.device); |
864 | dma_addr_t addr; |
865 | |
866 | if (__dma_omap15xx(od->plat->dma_attr)) { |
867 | addr = omap_dma_chan_read(c, reg: CPC); |
868 | } else { |
869 | addr = omap_dma_chan_read_3_3(c, reg: CDAC); |
870 | |
871 | /* |
872 | * CDAC == 0 indicates that the DMA transfer on the channel |
873 | * has not been started (no data has been transferred so |
874 | * far). Return the programmed destination start address in |
875 | * this case. |
876 | */ |
877 | if (addr == 0) |
878 | addr = omap_dma_chan_read(c, reg: CDSA); |
879 | } |
880 | |
881 | if (dma_omap1()) |
882 | addr |= omap_dma_chan_read(c, reg: CDSA) & 0xffff0000; |
883 | |
884 | return addr; |
885 | } |
886 | |
887 | static enum dma_status omap_dma_tx_status(struct dma_chan *chan, |
888 | dma_cookie_t cookie, struct dma_tx_state *txstate) |
889 | { |
890 | struct omap_chan *c = to_omap_dma_chan(c: chan); |
891 | enum dma_status ret; |
892 | unsigned long flags; |
893 | struct omap_desc *d = NULL; |
894 | |
895 | ret = dma_cookie_status(chan, cookie, state: txstate); |
896 | if (ret == DMA_COMPLETE) |
897 | return ret; |
898 | |
899 | spin_lock_irqsave(&c->vc.lock, flags); |
900 | if (c->desc && c->desc->vd.tx.cookie == cookie) |
901 | d = c->desc; |
902 | |
903 | if (!txstate) |
904 | goto out; |
905 | |
906 | if (d) { |
907 | dma_addr_t pos; |
908 | |
909 | if (d->dir == DMA_MEM_TO_DEV) |
910 | pos = omap_dma_get_src_pos(c); |
911 | else if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) |
912 | pos = omap_dma_get_dst_pos(c); |
913 | else |
914 | pos = 0; |
915 | |
916 | txstate->residue = omap_dma_desc_size_pos(d, addr: pos); |
917 | } else { |
918 | struct virt_dma_desc *vd = vchan_find_desc(&c->vc, cookie); |
919 | |
920 | if (vd) |
921 | txstate->residue = omap_dma_desc_size( |
922 | d: to_omap_dma_desc(t: &vd->tx)); |
923 | else |
924 | txstate->residue = 0; |
925 | } |
926 | |
927 | out: |
928 | if (ret == DMA_IN_PROGRESS && c->paused) { |
929 | ret = DMA_PAUSED; |
930 | } else if (d && d->polled && c->running) { |
931 | uint32_t ccr = omap_dma_chan_read(c, reg: CCR); |
932 | /* |
933 | * The channel is no longer active, set the return value |
934 | * accordingly and mark it as completed |
935 | */ |
936 | if (!(ccr & CCR_ENABLE)) { |
937 | ret = DMA_COMPLETE; |
938 | omap_dma_start_desc(c); |
939 | vchan_cookie_complete(vd: &d->vd); |
940 | } |
941 | } |
942 | |
943 | spin_unlock_irqrestore(lock: &c->vc.lock, flags); |
944 | |
945 | return ret; |
946 | } |
947 | |
948 | static void omap_dma_issue_pending(struct dma_chan *chan) |
949 | { |
950 | struct omap_chan *c = to_omap_dma_chan(c: chan); |
951 | unsigned long flags; |
952 | |
953 | spin_lock_irqsave(&c->vc.lock, flags); |
954 | if (vchan_issue_pending(vc: &c->vc) && !c->desc) |
955 | omap_dma_start_desc(c); |
956 | spin_unlock_irqrestore(lock: &c->vc.lock, flags); |
957 | } |
958 | |
959 | static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( |
960 | struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen, |
961 | enum dma_transfer_direction dir, unsigned long tx_flags, void *context) |
962 | { |
963 | struct omap_dmadev *od = to_omap_dma_dev(d: chan->device); |
964 | struct omap_chan *c = to_omap_dma_chan(c: chan); |
965 | enum dma_slave_buswidth dev_width; |
966 | struct scatterlist *sgent; |
967 | struct omap_desc *d; |
968 | dma_addr_t dev_addr; |
969 | unsigned i, es, en, frame_bytes; |
970 | bool ll_failed = false; |
971 | u32 burst; |
972 | u32 port_window, port_window_bytes; |
973 | |
974 | if (dir == DMA_DEV_TO_MEM) { |
975 | dev_addr = c->cfg.src_addr; |
976 | dev_width = c->cfg.src_addr_width; |
977 | burst = c->cfg.src_maxburst; |
978 | port_window = c->cfg.src_port_window_size; |
979 | } else if (dir == DMA_MEM_TO_DEV) { |
980 | dev_addr = c->cfg.dst_addr; |
981 | dev_width = c->cfg.dst_addr_width; |
982 | burst = c->cfg.dst_maxburst; |
983 | port_window = c->cfg.dst_port_window_size; |
984 | } else { |
985 | dev_err(chan->device->dev, "%s: bad direction?\n" , __func__); |
986 | return NULL; |
987 | } |
988 | |
989 | /* Bus width translates to the element size (ES) */ |
990 | switch (dev_width) { |
991 | case DMA_SLAVE_BUSWIDTH_1_BYTE: |
992 | es = CSDP_DATA_TYPE_8; |
993 | break; |
994 | case DMA_SLAVE_BUSWIDTH_2_BYTES: |
995 | es = CSDP_DATA_TYPE_16; |
996 | break; |
997 | case DMA_SLAVE_BUSWIDTH_4_BYTES: |
998 | es = CSDP_DATA_TYPE_32; |
999 | break; |
1000 | default: /* not reached */ |
1001 | return NULL; |
1002 | } |
1003 | |
1004 | /* Now allocate and setup the descriptor. */ |
1005 | d = kzalloc(struct_size(d, sg, sglen), GFP_ATOMIC); |
1006 | if (!d) |
1007 | return NULL; |
1008 | d->sglen = sglen; |
1009 | |
1010 | d->dir = dir; |
1011 | d->dev_addr = dev_addr; |
1012 | d->es = es; |
1013 | |
1014 | /* When the port_window is used, one frame must cover the window */ |
1015 | if (port_window) { |
1016 | burst = port_window; |
1017 | port_window_bytes = port_window * es_bytes[es]; |
1018 | |
1019 | d->ei = 1; |
1020 | /* |
1021 | * One frame covers the port_window and by configure |
1022 | * the source frame index to be -1 * (port_window - 1) |
1023 | * we instruct the sDMA that after a frame is processed |
1024 | * it should move back to the start of the window. |
1025 | */ |
1026 | d->fi = -(port_window_bytes - 1); |
1027 | } |
1028 | |
1029 | d->ccr = c->ccr | CCR_SYNC_FRAME; |
1030 | if (dir == DMA_DEV_TO_MEM) { |
1031 | d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED; |
1032 | |
1033 | d->ccr |= CCR_DST_AMODE_POSTINC; |
1034 | if (port_window) { |
1035 | d->ccr |= CCR_SRC_AMODE_DBLIDX; |
1036 | |
1037 | if (port_window_bytes >= 64) |
1038 | d->csdp |= CSDP_SRC_BURST_64; |
1039 | else if (port_window_bytes >= 32) |
1040 | d->csdp |= CSDP_SRC_BURST_32; |
1041 | else if (port_window_bytes >= 16) |
1042 | d->csdp |= CSDP_SRC_BURST_16; |
1043 | |
1044 | } else { |
1045 | d->ccr |= CCR_SRC_AMODE_CONSTANT; |
1046 | } |
1047 | } else { |
1048 | d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED; |
1049 | |
1050 | d->ccr |= CCR_SRC_AMODE_POSTINC; |
1051 | if (port_window) { |
1052 | d->ccr |= CCR_DST_AMODE_DBLIDX; |
1053 | |
1054 | if (port_window_bytes >= 64) |
1055 | d->csdp |= CSDP_DST_BURST_64; |
1056 | else if (port_window_bytes >= 32) |
1057 | d->csdp |= CSDP_DST_BURST_32; |
1058 | else if (port_window_bytes >= 16) |
1059 | d->csdp |= CSDP_DST_BURST_16; |
1060 | } else { |
1061 | d->ccr |= CCR_DST_AMODE_CONSTANT; |
1062 | } |
1063 | } |
1064 | |
1065 | d->cicr = CICR_DROP_IE | CICR_BLOCK_IE; |
1066 | d->csdp |= es; |
1067 | |
1068 | if (dma_omap1()) { |
1069 | d->cicr |= CICR_TOUT_IE; |
1070 | |
1071 | if (dir == DMA_DEV_TO_MEM) |
1072 | d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_TIPB; |
1073 | else |
1074 | d->csdp |= CSDP_DST_PORT_TIPB | CSDP_SRC_PORT_EMIFF; |
1075 | } else { |
1076 | if (dir == DMA_DEV_TO_MEM) |
1077 | d->ccr |= CCR_TRIGGER_SRC; |
1078 | |
1079 | d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; |
1080 | |
1081 | if (port_window) |
1082 | d->csdp |= CSDP_WRITE_LAST_NON_POSTED; |
1083 | } |
1084 | if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS) |
1085 | d->clnk_ctrl = c->dma_ch; |
1086 | |
1087 | /* |
1088 | * Build our scatterlist entries: each contains the address, |
1089 | * the number of elements (EN) in each frame, and the number of |
1090 | * frames (FN). Number of bytes for this entry = ES * EN * FN. |
1091 | * |
1092 | * Burst size translates to number of elements with frame sync. |
1093 | * Note: DMA engine defines burst to be the number of dev-width |
1094 | * transfers. |
1095 | */ |
1096 | en = burst; |
1097 | frame_bytes = es_bytes[es] * en; |
1098 | |
1099 | if (sglen >= 2) |
1100 | d->using_ll = od->ll123_supported; |
1101 | |
1102 | for_each_sg(sgl, sgent, sglen, i) { |
1103 | struct omap_sg *osg = &d->sg[i]; |
1104 | |
1105 | osg->addr = sg_dma_address(sgent); |
1106 | osg->en = en; |
1107 | osg->fn = sg_dma_len(sgent) / frame_bytes; |
1108 | |
1109 | if (d->using_ll) { |
1110 | osg->t2_desc = dma_pool_alloc(pool: od->desc_pool, GFP_ATOMIC, |
1111 | handle: &osg->t2_desc_paddr); |
1112 | if (!osg->t2_desc) { |
1113 | dev_err(chan->device->dev, |
1114 | "t2_desc[%d] allocation failed\n" , i); |
1115 | ll_failed = true; |
1116 | d->using_ll = false; |
1117 | continue; |
1118 | } |
1119 | |
1120 | omap_dma_fill_type2_desc(d, idx: i, dir, last: (i == sglen - 1)); |
1121 | } |
1122 | } |
1123 | |
1124 | /* Release the dma_pool entries if one allocation failed */ |
1125 | if (ll_failed) { |
1126 | for (i = 0; i < d->sglen; i++) { |
1127 | struct omap_sg *osg = &d->sg[i]; |
1128 | |
1129 | if (osg->t2_desc) { |
1130 | dma_pool_free(pool: od->desc_pool, vaddr: osg->t2_desc, |
1131 | addr: osg->t2_desc_paddr); |
1132 | osg->t2_desc = NULL; |
1133 | } |
1134 | } |
1135 | } |
1136 | |
1137 | return vchan_tx_prep(vc: &c->vc, vd: &d->vd, tx_flags); |
1138 | } |
1139 | |
1140 | static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic( |
1141 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, |
1142 | size_t period_len, enum dma_transfer_direction dir, unsigned long flags) |
1143 | { |
1144 | struct omap_dmadev *od = to_omap_dma_dev(d: chan->device); |
1145 | struct omap_chan *c = to_omap_dma_chan(c: chan); |
1146 | enum dma_slave_buswidth dev_width; |
1147 | struct omap_desc *d; |
1148 | dma_addr_t dev_addr; |
1149 | unsigned es; |
1150 | u32 burst; |
1151 | |
1152 | if (dir == DMA_DEV_TO_MEM) { |
1153 | dev_addr = c->cfg.src_addr; |
1154 | dev_width = c->cfg.src_addr_width; |
1155 | burst = c->cfg.src_maxburst; |
1156 | } else if (dir == DMA_MEM_TO_DEV) { |
1157 | dev_addr = c->cfg.dst_addr; |
1158 | dev_width = c->cfg.dst_addr_width; |
1159 | burst = c->cfg.dst_maxburst; |
1160 | } else { |
1161 | dev_err(chan->device->dev, "%s: bad direction?\n" , __func__); |
1162 | return NULL; |
1163 | } |
1164 | |
1165 | /* Bus width translates to the element size (ES) */ |
1166 | switch (dev_width) { |
1167 | case DMA_SLAVE_BUSWIDTH_1_BYTE: |
1168 | es = CSDP_DATA_TYPE_8; |
1169 | break; |
1170 | case DMA_SLAVE_BUSWIDTH_2_BYTES: |
1171 | es = CSDP_DATA_TYPE_16; |
1172 | break; |
1173 | case DMA_SLAVE_BUSWIDTH_4_BYTES: |
1174 | es = CSDP_DATA_TYPE_32; |
1175 | break; |
1176 | default: /* not reached */ |
1177 | return NULL; |
1178 | } |
1179 | |
1180 | /* Now allocate and setup the descriptor. */ |
1181 | d = kzalloc(size: sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC); |
1182 | if (!d) |
1183 | return NULL; |
1184 | |
1185 | d->dir = dir; |
1186 | d->dev_addr = dev_addr; |
1187 | d->fi = burst; |
1188 | d->es = es; |
1189 | d->sg[0].addr = buf_addr; |
1190 | d->sg[0].en = period_len / es_bytes[es]; |
1191 | d->sg[0].fn = buf_len / period_len; |
1192 | d->sglen = 1; |
1193 | |
1194 | d->ccr = c->ccr; |
1195 | if (dir == DMA_DEV_TO_MEM) |
1196 | d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT; |
1197 | else |
1198 | d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC; |
1199 | |
1200 | d->cicr = CICR_DROP_IE; |
1201 | if (flags & DMA_PREP_INTERRUPT) |
1202 | d->cicr |= CICR_FRAME_IE; |
1203 | |
1204 | d->csdp = es; |
1205 | |
1206 | if (dma_omap1()) { |
1207 | d->cicr |= CICR_TOUT_IE; |
1208 | |
1209 | if (dir == DMA_DEV_TO_MEM) |
1210 | d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_MPUI; |
1211 | else |
1212 | d->csdp |= CSDP_DST_PORT_MPUI | CSDP_SRC_PORT_EMIFF; |
1213 | } else { |
1214 | if (burst) |
1215 | d->ccr |= CCR_SYNC_PACKET; |
1216 | else |
1217 | d->ccr |= CCR_SYNC_ELEMENT; |
1218 | |
1219 | if (dir == DMA_DEV_TO_MEM) { |
1220 | d->ccr |= CCR_TRIGGER_SRC; |
1221 | d->csdp |= CSDP_DST_PACKED; |
1222 | } else { |
1223 | d->csdp |= CSDP_SRC_PACKED; |
1224 | } |
1225 | |
1226 | d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; |
1227 | |
1228 | d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64; |
1229 | } |
1230 | |
1231 | if (__dma_omap15xx(od->plat->dma_attr)) |
1232 | d->ccr |= CCR_AUTO_INIT | CCR_REPEAT; |
1233 | else |
1234 | d->clnk_ctrl = c->dma_ch | CLNK_CTRL_ENABLE_LNK; |
1235 | |
1236 | c->cyclic = true; |
1237 | |
1238 | return vchan_tx_prep(vc: &c->vc, vd: &d->vd, tx_flags: flags); |
1239 | } |
1240 | |
1241 | static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy( |
1242 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, |
1243 | size_t len, unsigned long tx_flags) |
1244 | { |
1245 | struct omap_chan *c = to_omap_dma_chan(c: chan); |
1246 | struct omap_desc *d; |
1247 | uint8_t data_type; |
1248 | |
1249 | d = kzalloc(size: sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC); |
1250 | if (!d) |
1251 | return NULL; |
1252 | |
1253 | data_type = __ffs((src | dest | len)); |
1254 | if (data_type > CSDP_DATA_TYPE_32) |
1255 | data_type = CSDP_DATA_TYPE_32; |
1256 | |
1257 | d->dir = DMA_MEM_TO_MEM; |
1258 | d->dev_addr = src; |
1259 | d->fi = 0; |
1260 | d->es = data_type; |
1261 | d->sg[0].en = len / BIT(data_type); |
1262 | d->sg[0].fn = 1; |
1263 | d->sg[0].addr = dest; |
1264 | d->sglen = 1; |
1265 | d->ccr = c->ccr; |
1266 | d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC; |
1267 | |
1268 | if (tx_flags & DMA_PREP_INTERRUPT) |
1269 | d->cicr |= CICR_FRAME_IE; |
1270 | else |
1271 | d->polled = true; |
1272 | |
1273 | d->csdp = data_type; |
1274 | |
1275 | if (dma_omap1()) { |
1276 | d->cicr |= CICR_TOUT_IE; |
1277 | d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_EMIFF; |
1278 | } else { |
1279 | d->csdp |= CSDP_DST_PACKED | CSDP_SRC_PACKED; |
1280 | d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; |
1281 | d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64; |
1282 | } |
1283 | |
1284 | return vchan_tx_prep(vc: &c->vc, vd: &d->vd, tx_flags); |
1285 | } |
1286 | |
1287 | static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved( |
1288 | struct dma_chan *chan, struct dma_interleaved_template *xt, |
1289 | unsigned long flags) |
1290 | { |
1291 | struct omap_chan *c = to_omap_dma_chan(c: chan); |
1292 | struct omap_desc *d; |
1293 | struct omap_sg *sg; |
1294 | uint8_t data_type; |
1295 | size_t src_icg, dst_icg; |
1296 | |
1297 | /* Slave mode is not supported */ |
1298 | if (is_slave_direction(direction: xt->dir)) |
1299 | return NULL; |
1300 | |
1301 | if (xt->frame_size != 1 || xt->numf == 0) |
1302 | return NULL; |
1303 | |
1304 | d = kzalloc(size: sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC); |
1305 | if (!d) |
1306 | return NULL; |
1307 | |
1308 | data_type = __ffs((xt->src_start | xt->dst_start | xt->sgl[0].size)); |
1309 | if (data_type > CSDP_DATA_TYPE_32) |
1310 | data_type = CSDP_DATA_TYPE_32; |
1311 | |
1312 | sg = &d->sg[0]; |
1313 | d->dir = DMA_MEM_TO_MEM; |
1314 | d->dev_addr = xt->src_start; |
1315 | d->es = data_type; |
1316 | sg->en = xt->sgl[0].size / BIT(data_type); |
1317 | sg->fn = xt->numf; |
1318 | sg->addr = xt->dst_start; |
1319 | d->sglen = 1; |
1320 | d->ccr = c->ccr; |
1321 | |
1322 | src_icg = dmaengine_get_src_icg(xt, chunk: &xt->sgl[0]); |
1323 | dst_icg = dmaengine_get_dst_icg(xt, chunk: &xt->sgl[0]); |
1324 | if (src_icg) { |
1325 | d->ccr |= CCR_SRC_AMODE_DBLIDX; |
1326 | d->ei = 1; |
1327 | d->fi = src_icg + 1; |
1328 | } else if (xt->src_inc) { |
1329 | d->ccr |= CCR_SRC_AMODE_POSTINC; |
1330 | d->fi = 0; |
1331 | } else { |
1332 | dev_err(chan->device->dev, |
1333 | "%s: SRC constant addressing is not supported\n" , |
1334 | __func__); |
1335 | kfree(objp: d); |
1336 | return NULL; |
1337 | } |
1338 | |
1339 | if (dst_icg) { |
1340 | d->ccr |= CCR_DST_AMODE_DBLIDX; |
1341 | sg->ei = 1; |
1342 | sg->fi = dst_icg + 1; |
1343 | } else if (xt->dst_inc) { |
1344 | d->ccr |= CCR_DST_AMODE_POSTINC; |
1345 | sg->fi = 0; |
1346 | } else { |
1347 | dev_err(chan->device->dev, |
1348 | "%s: DST constant addressing is not supported\n" , |
1349 | __func__); |
1350 | kfree(objp: d); |
1351 | return NULL; |
1352 | } |
1353 | |
1354 | d->cicr = CICR_DROP_IE | CICR_FRAME_IE; |
1355 | |
1356 | d->csdp = data_type; |
1357 | |
1358 | if (dma_omap1()) { |
1359 | d->cicr |= CICR_TOUT_IE; |
1360 | d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_EMIFF; |
1361 | } else { |
1362 | d->csdp |= CSDP_DST_PACKED | CSDP_SRC_PACKED; |
1363 | d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; |
1364 | d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64; |
1365 | } |
1366 | |
1367 | return vchan_tx_prep(vc: &c->vc, vd: &d->vd, tx_flags: flags); |
1368 | } |
1369 | |
1370 | static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg) |
1371 | { |
1372 | struct omap_chan *c = to_omap_dma_chan(c: chan); |
1373 | |
1374 | if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || |
1375 | cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) |
1376 | return -EINVAL; |
1377 | |
1378 | if (cfg->src_maxburst > chan->device->max_burst || |
1379 | cfg->dst_maxburst > chan->device->max_burst) |
1380 | return -EINVAL; |
1381 | |
1382 | memcpy(&c->cfg, cfg, sizeof(c->cfg)); |
1383 | |
1384 | return 0; |
1385 | } |
1386 | |
1387 | static int omap_dma_terminate_all(struct dma_chan *chan) |
1388 | { |
1389 | struct omap_chan *c = to_omap_dma_chan(c: chan); |
1390 | unsigned long flags; |
1391 | LIST_HEAD(head); |
1392 | |
1393 | spin_lock_irqsave(&c->vc.lock, flags); |
1394 | |
1395 | /* |
1396 | * Stop DMA activity: we assume the callback will not be called |
1397 | * after omap_dma_stop() returns (even if it does, it will see |
1398 | * c->desc is NULL and exit.) |
1399 | */ |
1400 | if (c->desc) { |
1401 | vchan_terminate_vdesc(vd: &c->desc->vd); |
1402 | c->desc = NULL; |
1403 | /* Avoid stopping the dma twice */ |
1404 | if (!c->paused) |
1405 | omap_dma_stop(c); |
1406 | } |
1407 | |
1408 | c->cyclic = false; |
1409 | c->paused = false; |
1410 | |
1411 | vchan_get_all_descriptors(vc: &c->vc, head: &head); |
1412 | spin_unlock_irqrestore(lock: &c->vc.lock, flags); |
1413 | vchan_dma_desc_free_list(vc: &c->vc, head: &head); |
1414 | |
1415 | return 0; |
1416 | } |
1417 | |
1418 | static void omap_dma_synchronize(struct dma_chan *chan) |
1419 | { |
1420 | struct omap_chan *c = to_omap_dma_chan(c: chan); |
1421 | |
1422 | vchan_synchronize(vc: &c->vc); |
1423 | } |
1424 | |
1425 | static int omap_dma_pause(struct dma_chan *chan) |
1426 | { |
1427 | struct omap_chan *c = to_omap_dma_chan(c: chan); |
1428 | struct omap_dmadev *od = to_omap_dma_dev(d: chan->device); |
1429 | unsigned long flags; |
1430 | int ret = -EINVAL; |
1431 | bool can_pause = false; |
1432 | |
1433 | spin_lock_irqsave(&od->irq_lock, flags); |
1434 | |
1435 | if (!c->desc) |
1436 | goto out; |
1437 | |
1438 | if (c->cyclic) |
1439 | can_pause = true; |
1440 | |
1441 | /* |
1442 | * We do not allow DMA_MEM_TO_DEV transfers to be paused. |
1443 | * From the AM572x TRM, 16.1.4.18 Disabling a Channel During Transfer: |
1444 | * "When a channel is disabled during a transfer, the channel undergoes |
1445 | * an abort, unless it is hardware-source-synchronized …". |
1446 | * A source-synchronised channel is one where the fetching of data is |
1447 | * under control of the device. In other words, a device-to-memory |
1448 | * transfer. So, a destination-synchronised channel (which would be a |
1449 | * memory-to-device transfer) undergoes an abort if the CCR_ENABLE |
1450 | * bit is cleared. |
1451 | * From 16.1.4.20.4.6.2 Abort: "If an abort trigger occurs, the channel |
1452 | * aborts immediately after completion of current read/write |
1453 | * transactions and then the FIFO is cleaned up." The term "cleaned up" |
1454 | * is not defined. TI recommends to check that RD_ACTIVE and WR_ACTIVE |
1455 | * are both clear _before_ disabling the channel, otherwise data loss |
1456 | * will occur. |
1457 | * The problem is that if the channel is active, then device activity |
1458 | * can result in DMA activity starting between reading those as both |
1459 | * clear and the write to DMA_CCR to clear the enable bit hitting the |
1460 | * hardware. If the DMA hardware can't drain the data in its FIFO to the |
1461 | * destination, then data loss "might" occur (say if we write to an UART |
1462 | * and the UART is not accepting any further data). |
1463 | */ |
1464 | else if (c->desc->dir == DMA_DEV_TO_MEM) |
1465 | can_pause = true; |
1466 | |
1467 | if (can_pause && !c->paused) { |
1468 | ret = omap_dma_stop(c); |
1469 | if (!ret) |
1470 | c->paused = true; |
1471 | } |
1472 | out: |
1473 | spin_unlock_irqrestore(lock: &od->irq_lock, flags); |
1474 | |
1475 | return ret; |
1476 | } |
1477 | |
1478 | static int omap_dma_resume(struct dma_chan *chan) |
1479 | { |
1480 | struct omap_chan *c = to_omap_dma_chan(c: chan); |
1481 | struct omap_dmadev *od = to_omap_dma_dev(d: chan->device); |
1482 | unsigned long flags; |
1483 | int ret = -EINVAL; |
1484 | |
1485 | spin_lock_irqsave(&od->irq_lock, flags); |
1486 | |
1487 | if (c->paused && c->desc) { |
1488 | mb(); |
1489 | |
1490 | /* Restore channel link register */ |
1491 | omap_dma_chan_write(c, reg: CLNK_CTRL, val: c->desc->clnk_ctrl); |
1492 | |
1493 | omap_dma_start(c, d: c->desc); |
1494 | c->paused = false; |
1495 | ret = 0; |
1496 | } |
1497 | spin_unlock_irqrestore(lock: &od->irq_lock, flags); |
1498 | |
1499 | return ret; |
1500 | } |
1501 | |
1502 | static int omap_dma_chan_init(struct omap_dmadev *od) |
1503 | { |
1504 | struct omap_chan *c; |
1505 | |
1506 | c = kzalloc(size: sizeof(*c), GFP_KERNEL); |
1507 | if (!c) |
1508 | return -ENOMEM; |
1509 | |
1510 | c->reg_map = od->reg_map; |
1511 | c->vc.desc_free = omap_dma_desc_free; |
1512 | vchan_init(vc: &c->vc, dmadev: &od->ddev); |
1513 | |
1514 | return 0; |
1515 | } |
1516 | |
1517 | static void omap_dma_free(struct omap_dmadev *od) |
1518 | { |
1519 | while (!list_empty(head: &od->ddev.channels)) { |
1520 | struct omap_chan *c = list_first_entry(&od->ddev.channels, |
1521 | struct omap_chan, vc.chan.device_node); |
1522 | |
1523 | list_del(entry: &c->vc.chan.device_node); |
1524 | tasklet_kill(t: &c->vc.task); |
1525 | kfree(objp: c); |
1526 | } |
1527 | } |
1528 | |
1529 | /* Currently used by omap2 & 3 to block deeper SoC idle states */ |
1530 | static bool omap_dma_busy(struct omap_dmadev *od) |
1531 | { |
1532 | struct omap_chan *c; |
1533 | int lch = -1; |
1534 | |
1535 | while (1) { |
1536 | lch = find_next_bit(addr: od->lch_bitmap, size: od->lch_count, offset: lch + 1); |
1537 | if (lch >= od->lch_count) |
1538 | break; |
1539 | c = od->lch_map[lch]; |
1540 | if (!c) |
1541 | continue; |
1542 | if (omap_dma_chan_read(c, reg: CCR) & CCR_ENABLE) |
1543 | return true; |
1544 | } |
1545 | |
1546 | return false; |
1547 | } |
1548 | |
1549 | /* Currently only used for omap2. For omap1, also a check for lcd_dma is needed */ |
1550 | static int omap_dma_busy_notifier(struct notifier_block *nb, |
1551 | unsigned long cmd, void *v) |
1552 | { |
1553 | struct omap_dmadev *od; |
1554 | |
1555 | od = container_of(nb, struct omap_dmadev, nb); |
1556 | |
1557 | switch (cmd) { |
1558 | case CPU_CLUSTER_PM_ENTER: |
1559 | if (omap_dma_busy(od)) |
1560 | return NOTIFY_BAD; |
1561 | break; |
1562 | case CPU_CLUSTER_PM_ENTER_FAILED: |
1563 | case CPU_CLUSTER_PM_EXIT: |
1564 | break; |
1565 | } |
1566 | |
1567 | return NOTIFY_OK; |
1568 | } |
1569 | |
1570 | /* |
1571 | * We are using IRQENABLE_L1, and legacy DMA code was using IRQENABLE_L0. |
1572 | * As the DSP may be using IRQENABLE_L2 and L3, let's not touch those for |
1573 | * now. Context save seems to be only currently needed on omap3. |
1574 | */ |
1575 | static void omap_dma_context_save(struct omap_dmadev *od) |
1576 | { |
1577 | od->context.irqenable_l0 = omap_dma_glbl_read(od, reg: IRQENABLE_L0); |
1578 | od->context.irqenable_l1 = omap_dma_glbl_read(od, reg: IRQENABLE_L1); |
1579 | od->context.ocp_sysconfig = omap_dma_glbl_read(od, reg: OCP_SYSCONFIG); |
1580 | od->context.gcr = omap_dma_glbl_read(od, reg: GCR); |
1581 | } |
1582 | |
1583 | static void omap_dma_context_restore(struct omap_dmadev *od) |
1584 | { |
1585 | int i; |
1586 | |
1587 | omap_dma_glbl_write(od, reg: GCR, val: od->context.gcr); |
1588 | omap_dma_glbl_write(od, reg: OCP_SYSCONFIG, val: od->context.ocp_sysconfig); |
1589 | omap_dma_glbl_write(od, reg: IRQENABLE_L0, val: od->context.irqenable_l0); |
1590 | omap_dma_glbl_write(od, reg: IRQENABLE_L1, val: od->context.irqenable_l1); |
1591 | |
1592 | /* Clear IRQSTATUS_L0 as legacy DMA code is no longer doing it */ |
1593 | if (od->plat->errata & DMA_ROMCODE_BUG) |
1594 | omap_dma_glbl_write(od, reg: IRQSTATUS_L0, val: 0); |
1595 | |
1596 | /* Clear dma channels */ |
1597 | for (i = 0; i < od->lch_count; i++) |
1598 | omap_dma_clear_lch(od, lch: i); |
1599 | } |
1600 | |
1601 | /* Currently only used for omap3 */ |
1602 | static int omap_dma_context_notifier(struct notifier_block *nb, |
1603 | unsigned long cmd, void *v) |
1604 | { |
1605 | struct omap_dmadev *od; |
1606 | |
1607 | od = container_of(nb, struct omap_dmadev, nb); |
1608 | |
1609 | switch (cmd) { |
1610 | case CPU_CLUSTER_PM_ENTER: |
1611 | if (omap_dma_busy(od)) |
1612 | return NOTIFY_BAD; |
1613 | omap_dma_context_save(od); |
1614 | break; |
1615 | case CPU_CLUSTER_PM_ENTER_FAILED: /* No need to restore context */ |
1616 | break; |
1617 | case CPU_CLUSTER_PM_EXIT: |
1618 | omap_dma_context_restore(od); |
1619 | break; |
1620 | } |
1621 | |
1622 | return NOTIFY_OK; |
1623 | } |
1624 | |
1625 | static void omap_dma_init_gcr(struct omap_dmadev *od, int arb_rate, |
1626 | int max_fifo_depth, int tparams) |
1627 | { |
1628 | u32 val; |
1629 | |
1630 | /* Set only for omap2430 and later */ |
1631 | if (!od->cfg->rw_priority) |
1632 | return; |
1633 | |
1634 | if (max_fifo_depth == 0) |
1635 | max_fifo_depth = 1; |
1636 | if (arb_rate == 0) |
1637 | arb_rate = 1; |
1638 | |
1639 | val = 0xff & max_fifo_depth; |
1640 | val |= (0x3 & tparams) << 12; |
1641 | val |= (arb_rate & 0xff) << 16; |
1642 | |
1643 | omap_dma_glbl_write(od, reg: GCR, val); |
1644 | } |
1645 | |
1646 | #define OMAP_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ |
1647 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ |
1648 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) |
1649 | |
1650 | /* |
1651 | * No flags currently set for default configuration as omap1 is still |
1652 | * using platform data. |
1653 | */ |
1654 | static const struct omap_dma_config default_cfg; |
1655 | |
1656 | static int omap_dma_probe(struct platform_device *pdev) |
1657 | { |
1658 | const struct omap_dma_config *conf; |
1659 | struct omap_dmadev *od; |
1660 | int rc, i, irq; |
1661 | u32 val; |
1662 | |
1663 | od = devm_kzalloc(dev: &pdev->dev, size: sizeof(*od), GFP_KERNEL); |
1664 | if (!od) |
1665 | return -ENOMEM; |
1666 | |
1667 | od->base = devm_platform_ioremap_resource(pdev, index: 0); |
1668 | if (IS_ERR(ptr: od->base)) |
1669 | return PTR_ERR(ptr: od->base); |
1670 | |
1671 | conf = of_device_get_match_data(dev: &pdev->dev); |
1672 | if (conf) { |
1673 | od->cfg = conf; |
1674 | od->plat = dev_get_platdata(dev: &pdev->dev); |
1675 | if (!od->plat) { |
1676 | dev_err(&pdev->dev, "omap_system_dma_plat_info is missing" ); |
1677 | return -ENODEV; |
1678 | } |
1679 | } else if (IS_ENABLED(CONFIG_ARCH_OMAP1)) { |
1680 | od->cfg = &default_cfg; |
1681 | |
1682 | od->plat = omap_get_plat_info(); |
1683 | if (!od->plat) |
1684 | return -EPROBE_DEFER; |
1685 | } else { |
1686 | return -ENODEV; |
1687 | } |
1688 | |
1689 | od->reg_map = od->plat->reg_map; |
1690 | |
1691 | dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); |
1692 | dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); |
1693 | dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask); |
1694 | dma_cap_set(DMA_INTERLEAVE, od->ddev.cap_mask); |
1695 | od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources; |
1696 | od->ddev.device_free_chan_resources = omap_dma_free_chan_resources; |
1697 | od->ddev.device_tx_status = omap_dma_tx_status; |
1698 | od->ddev.device_issue_pending = omap_dma_issue_pending; |
1699 | od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg; |
1700 | od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic; |
1701 | od->ddev.device_prep_dma_memcpy = omap_dma_prep_dma_memcpy; |
1702 | od->ddev.device_prep_interleaved_dma = omap_dma_prep_dma_interleaved; |
1703 | od->ddev.device_config = omap_dma_slave_config; |
1704 | od->ddev.device_pause = omap_dma_pause; |
1705 | od->ddev.device_resume = omap_dma_resume; |
1706 | od->ddev.device_terminate_all = omap_dma_terminate_all; |
1707 | od->ddev.device_synchronize = omap_dma_synchronize; |
1708 | od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS; |
1709 | od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS; |
1710 | od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); |
1711 | if (__dma_omap15xx(od->plat->dma_attr)) |
1712 | od->ddev.residue_granularity = |
1713 | DMA_RESIDUE_GRANULARITY_DESCRIPTOR; |
1714 | else |
1715 | od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
1716 | od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */ |
1717 | od->ddev.dev = &pdev->dev; |
1718 | INIT_LIST_HEAD(list: &od->ddev.channels); |
1719 | mutex_init(&od->lch_lock); |
1720 | spin_lock_init(&od->lock); |
1721 | spin_lock_init(&od->irq_lock); |
1722 | |
1723 | /* Number of DMA requests */ |
1724 | od->dma_requests = OMAP_SDMA_REQUESTS; |
1725 | if (pdev->dev.of_node && of_property_read_u32(np: pdev->dev.of_node, |
1726 | propname: "dma-requests" , |
1727 | out_value: &od->dma_requests)) { |
1728 | dev_info(&pdev->dev, |
1729 | "Missing dma-requests property, using %u.\n" , |
1730 | OMAP_SDMA_REQUESTS); |
1731 | } |
1732 | |
1733 | /* Number of available logical channels */ |
1734 | if (!pdev->dev.of_node) { |
1735 | od->lch_count = od->plat->dma_attr->lch_count; |
1736 | if (unlikely(!od->lch_count)) |
1737 | od->lch_count = OMAP_SDMA_CHANNELS; |
1738 | } else if (of_property_read_u32(np: pdev->dev.of_node, propname: "dma-channels" , |
1739 | out_value: &od->lch_count)) { |
1740 | dev_info(&pdev->dev, |
1741 | "Missing dma-channels property, using %u.\n" , |
1742 | OMAP_SDMA_CHANNELS); |
1743 | od->lch_count = OMAP_SDMA_CHANNELS; |
1744 | } |
1745 | |
1746 | /* Mask of allowed logical channels */ |
1747 | if (pdev->dev.of_node && !of_property_read_u32(np: pdev->dev.of_node, |
1748 | propname: "dma-channel-mask" , |
1749 | out_value: &val)) { |
1750 | /* Tag channels not in mask as reserved */ |
1751 | val = ~val; |
1752 | bitmap_from_arr32(bitmap: od->lch_bitmap, buf: &val, nbits: od->lch_count); |
1753 | } |
1754 | if (od->plat->dma_attr->dev_caps & HS_CHANNELS_RESERVED) |
1755 | bitmap_set(map: od->lch_bitmap, start: 0, nbits: 2); |
1756 | |
1757 | od->lch_map = devm_kcalloc(dev: &pdev->dev, n: od->lch_count, |
1758 | size: sizeof(*od->lch_map), |
1759 | GFP_KERNEL); |
1760 | if (!od->lch_map) |
1761 | return -ENOMEM; |
1762 | |
1763 | for (i = 0; i < od->dma_requests; i++) { |
1764 | rc = omap_dma_chan_init(od); |
1765 | if (rc) { |
1766 | omap_dma_free(od); |
1767 | return rc; |
1768 | } |
1769 | } |
1770 | |
1771 | irq = platform_get_irq(pdev, 1); |
1772 | if (irq <= 0) { |
1773 | dev_info(&pdev->dev, "failed to get L1 IRQ: %d\n" , irq); |
1774 | od->legacy = true; |
1775 | } else { |
1776 | /* Disable all interrupts */ |
1777 | od->irq_enable_mask = 0; |
1778 | omap_dma_glbl_write(od, reg: IRQENABLE_L1, val: 0); |
1779 | |
1780 | rc = devm_request_irq(dev: &pdev->dev, irq, handler: omap_dma_irq, |
1781 | IRQF_SHARED, devname: "omap-dma-engine" , dev_id: od); |
1782 | if (rc) { |
1783 | omap_dma_free(od); |
1784 | return rc; |
1785 | } |
1786 | } |
1787 | |
1788 | if (omap_dma_glbl_read(od, reg: CAPS_0) & CAPS_0_SUPPORT_LL123) |
1789 | od->ll123_supported = true; |
1790 | |
1791 | od->ddev.filter.map = od->plat->slave_map; |
1792 | od->ddev.filter.mapcnt = od->plat->slavecnt; |
1793 | od->ddev.filter.fn = omap_dma_filter_fn; |
1794 | |
1795 | if (od->ll123_supported) { |
1796 | od->desc_pool = dma_pool_create(name: dev_name(dev: &pdev->dev), |
1797 | dev: &pdev->dev, |
1798 | size: sizeof(struct omap_type2_desc), |
1799 | align: 4, allocation: 0); |
1800 | if (!od->desc_pool) { |
1801 | dev_err(&pdev->dev, |
1802 | "unable to allocate descriptor pool\n" ); |
1803 | od->ll123_supported = false; |
1804 | } |
1805 | } |
1806 | |
1807 | rc = dma_async_device_register(device: &od->ddev); |
1808 | if (rc) { |
1809 | pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n" , |
1810 | rc); |
1811 | omap_dma_free(od); |
1812 | return rc; |
1813 | } |
1814 | |
1815 | platform_set_drvdata(pdev, data: od); |
1816 | |
1817 | if (pdev->dev.of_node) { |
1818 | omap_dma_info.dma_cap = od->ddev.cap_mask; |
1819 | |
1820 | /* Device-tree DMA controller registration */ |
1821 | rc = of_dma_controller_register(np: pdev->dev.of_node, |
1822 | of_dma_xlate: of_dma_simple_xlate, data: &omap_dma_info); |
1823 | if (rc) { |
1824 | pr_warn("OMAP-DMA: failed to register DMA controller\n" ); |
1825 | dma_async_device_unregister(device: &od->ddev); |
1826 | omap_dma_free(od); |
1827 | } |
1828 | } |
1829 | |
1830 | omap_dma_init_gcr(od, DMA_DEFAULT_ARB_RATE, DMA_DEFAULT_FIFO_DEPTH, tparams: 0); |
1831 | |
1832 | if (od->cfg->needs_busy_check) { |
1833 | od->nb.notifier_call = omap_dma_busy_notifier; |
1834 | cpu_pm_register_notifier(nb: &od->nb); |
1835 | } else if (od->cfg->may_lose_context) { |
1836 | od->nb.notifier_call = omap_dma_context_notifier; |
1837 | cpu_pm_register_notifier(nb: &od->nb); |
1838 | } |
1839 | |
1840 | dev_info(&pdev->dev, "OMAP DMA engine driver%s\n" , |
1841 | od->ll123_supported ? " (LinkedList1/2/3 supported)" : "" ); |
1842 | |
1843 | return rc; |
1844 | } |
1845 | |
1846 | static void omap_dma_remove(struct platform_device *pdev) |
1847 | { |
1848 | struct omap_dmadev *od = platform_get_drvdata(pdev); |
1849 | int irq; |
1850 | |
1851 | if (od->cfg->may_lose_context) |
1852 | cpu_pm_unregister_notifier(nb: &od->nb); |
1853 | |
1854 | if (pdev->dev.of_node) |
1855 | of_dma_controller_free(np: pdev->dev.of_node); |
1856 | |
1857 | irq = platform_get_irq(pdev, 1); |
1858 | devm_free_irq(dev: &pdev->dev, irq, dev_id: od); |
1859 | |
1860 | dma_async_device_unregister(device: &od->ddev); |
1861 | |
1862 | if (!omap_dma_legacy(od)) { |
1863 | /* Disable all interrupts */ |
1864 | omap_dma_glbl_write(od, reg: IRQENABLE_L0, val: 0); |
1865 | } |
1866 | |
1867 | if (od->ll123_supported) |
1868 | dma_pool_destroy(pool: od->desc_pool); |
1869 | |
1870 | omap_dma_free(od); |
1871 | } |
1872 | |
1873 | static const struct omap_dma_config omap2420_data = { |
1874 | .lch_end = CCFN, |
1875 | .rw_priority = true, |
1876 | .needs_lch_clear = true, |
1877 | .needs_busy_check = true, |
1878 | }; |
1879 | |
1880 | static const struct omap_dma_config omap2430_data = { |
1881 | .lch_end = CCFN, |
1882 | .rw_priority = true, |
1883 | .needs_lch_clear = true, |
1884 | }; |
1885 | |
1886 | static const struct omap_dma_config omap3430_data = { |
1887 | .lch_end = CCFN, |
1888 | .rw_priority = true, |
1889 | .needs_lch_clear = true, |
1890 | .may_lose_context = true, |
1891 | }; |
1892 | |
1893 | static const struct omap_dma_config omap3630_data = { |
1894 | .lch_end = CCDN, |
1895 | .rw_priority = true, |
1896 | .needs_lch_clear = true, |
1897 | .may_lose_context = true, |
1898 | }; |
1899 | |
1900 | static const struct omap_dma_config omap4_data = { |
1901 | .lch_end = CCDN, |
1902 | .rw_priority = true, |
1903 | .needs_lch_clear = true, |
1904 | }; |
1905 | |
1906 | static const struct of_device_id omap_dma_match[] = { |
1907 | { .compatible = "ti,omap2420-sdma" , .data = &omap2420_data, }, |
1908 | { .compatible = "ti,omap2430-sdma" , .data = &omap2430_data, }, |
1909 | { .compatible = "ti,omap3430-sdma" , .data = &omap3430_data, }, |
1910 | { .compatible = "ti,omap3630-sdma" , .data = &omap3630_data, }, |
1911 | { .compatible = "ti,omap4430-sdma" , .data = &omap4_data, }, |
1912 | {}, |
1913 | }; |
1914 | MODULE_DEVICE_TABLE(of, omap_dma_match); |
1915 | |
1916 | static struct platform_driver omap_dma_driver = { |
1917 | .probe = omap_dma_probe, |
1918 | .remove_new = omap_dma_remove, |
1919 | .driver = { |
1920 | .name = "omap-dma-engine" , |
1921 | .of_match_table = omap_dma_match, |
1922 | }, |
1923 | }; |
1924 | |
1925 | static bool omap_dma_filter_fn(struct dma_chan *chan, void *param) |
1926 | { |
1927 | if (chan->device->dev->driver == &omap_dma_driver.driver) { |
1928 | struct omap_dmadev *od = to_omap_dma_dev(d: chan->device); |
1929 | struct omap_chan *c = to_omap_dma_chan(c: chan); |
1930 | unsigned req = *(unsigned *)param; |
1931 | |
1932 | if (req <= od->dma_requests) { |
1933 | c->dma_sig = req; |
1934 | return true; |
1935 | } |
1936 | } |
1937 | return false; |
1938 | } |
1939 | |
1940 | static int omap_dma_init(void) |
1941 | { |
1942 | return platform_driver_register(&omap_dma_driver); |
1943 | } |
1944 | subsys_initcall(omap_dma_init); |
1945 | |
1946 | static void __exit omap_dma_exit(void) |
1947 | { |
1948 | platform_driver_unregister(&omap_dma_driver); |
1949 | } |
1950 | module_exit(omap_dma_exit); |
1951 | |
1952 | MODULE_AUTHOR("Russell King" ); |
1953 | MODULE_LICENSE("GPL" ); |
1954 | |