1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | // |
3 | // Copyright (c) 2013-2014 Freescale Semiconductor, Inc |
4 | // Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it> |
5 | |
6 | #include <linux/dmapool.h> |
7 | #include <linux/module.h> |
8 | #include <linux/slab.h> |
9 | #include <linux/dma-mapping.h> |
10 | #include <linux/pm_runtime.h> |
11 | #include <linux/pm_domain.h> |
12 | |
13 | #include "fsl-edma-common.h" |
14 | |
15 | #define EDMA_CR 0x00 |
16 | #define EDMA_ES 0x04 |
17 | #define EDMA_ERQ 0x0C |
18 | #define EDMA_EEI 0x14 |
19 | #define EDMA_SERQ 0x1B |
20 | #define EDMA_CERQ 0x1A |
21 | #define EDMA_SEEI 0x19 |
22 | #define EDMA_CEEI 0x18 |
23 | #define EDMA_CINT 0x1F |
24 | #define EDMA_CERR 0x1E |
25 | #define EDMA_SSRT 0x1D |
26 | #define EDMA_CDNE 0x1C |
27 | #define EDMA_INTR 0x24 |
28 | #define EDMA_ERR 0x2C |
29 | |
30 | #define EDMA64_ERQH 0x08 |
31 | #define EDMA64_EEIH 0x10 |
32 | #define EDMA64_SERQ 0x18 |
33 | #define EDMA64_CERQ 0x19 |
34 | #define EDMA64_SEEI 0x1a |
35 | #define EDMA64_CEEI 0x1b |
36 | #define EDMA64_CINT 0x1c |
37 | #define EDMA64_CERR 0x1d |
38 | #define EDMA64_SSRT 0x1e |
39 | #define EDMA64_CDNE 0x1f |
40 | #define EDMA64_INTH 0x20 |
41 | #define EDMA64_INTL 0x24 |
42 | #define EDMA64_ERRH 0x28 |
43 | #define EDMA64_ERRL 0x2c |
44 | |
45 | void fsl_edma_tx_chan_handler(struct fsl_edma_chan *fsl_chan) |
46 | { |
47 | spin_lock(lock: &fsl_chan->vchan.lock); |
48 | |
49 | if (!fsl_chan->edesc) { |
50 | /* terminate_all called before */ |
51 | spin_unlock(lock: &fsl_chan->vchan.lock); |
52 | return; |
53 | } |
54 | |
55 | if (!fsl_chan->edesc->iscyclic) { |
56 | list_del(entry: &fsl_chan->edesc->vdesc.node); |
57 | vchan_cookie_complete(vd: &fsl_chan->edesc->vdesc); |
58 | fsl_chan->edesc = NULL; |
59 | fsl_chan->status = DMA_COMPLETE; |
60 | fsl_chan->idle = true; |
61 | } else { |
62 | vchan_cyclic_callback(vd: &fsl_chan->edesc->vdesc); |
63 | } |
64 | |
65 | if (!fsl_chan->edesc) |
66 | fsl_edma_xfer_desc(fsl_chan); |
67 | |
68 | spin_unlock(lock: &fsl_chan->vchan.lock); |
69 | } |
70 | |
71 | static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan) |
72 | { |
73 | u32 val, flags; |
74 | |
75 | flags = fsl_edma_drvflags(fsl_chan); |
76 | val = edma_readl_chreg(fsl_chan, ch_sbr); |
77 | /* Remote/local swapped wrongly on iMX8 QM Audio edma */ |
78 | if (flags & FSL_EDMA_DRV_QUIRK_SWAPPED) { |
79 | if (!fsl_chan->is_rxchan) |
80 | val |= EDMA_V3_CH_SBR_RD; |
81 | else |
82 | val |= EDMA_V3_CH_SBR_WR; |
83 | } else { |
84 | if (fsl_chan->is_rxchan) |
85 | val |= EDMA_V3_CH_SBR_RD; |
86 | else |
87 | val |= EDMA_V3_CH_SBR_WR; |
88 | } |
89 | |
90 | if (fsl_chan->is_remote) |
91 | val &= ~(EDMA_V3_CH_SBR_RD | EDMA_V3_CH_SBR_WR); |
92 | |
93 | edma_writel_chreg(fsl_chan, val, ch_sbr); |
94 | |
95 | if (flags & FSL_EDMA_DRV_HAS_CHMUX) { |
96 | /* |
97 | * ch_mux: With the exception of 0, attempts to write a value |
98 | * already in use will be forced to 0. |
99 | */ |
100 | if (!edma_readl_chreg(fsl_chan, ch_mux)) |
101 | edma_writel_chreg(fsl_chan, fsl_chan->srcid, ch_mux); |
102 | } |
103 | |
104 | val = edma_readl_chreg(fsl_chan, ch_csr); |
105 | val |= EDMA_V3_CH_CSR_ERQ; |
106 | edma_writel_chreg(fsl_chan, val, ch_csr); |
107 | } |
108 | |
109 | static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan) |
110 | { |
111 | struct edma_regs *regs = &fsl_chan->edma->regs; |
112 | u32 ch = fsl_chan->vchan.chan.chan_id; |
113 | |
114 | if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_SPLIT_REG) |
115 | return fsl_edma3_enable_request(fsl_chan); |
116 | |
117 | if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) { |
118 | edma_writeb(edma: fsl_chan->edma, EDMA_SEEI_SEEI(ch), addr: regs->seei); |
119 | edma_writeb(edma: fsl_chan->edma, val: ch, addr: regs->serq); |
120 | } else { |
121 | /* ColdFire is big endian, and accesses natively |
122 | * big endian I/O peripherals |
123 | */ |
124 | iowrite8(EDMA_SEEI_SEEI(ch), regs->seei); |
125 | iowrite8(ch, regs->serq); |
126 | } |
127 | } |
128 | |
129 | static void fsl_edma3_disable_request(struct fsl_edma_chan *fsl_chan) |
130 | { |
131 | u32 val = edma_readl_chreg(fsl_chan, ch_csr); |
132 | u32 flags; |
133 | |
134 | flags = fsl_edma_drvflags(fsl_chan); |
135 | |
136 | if (flags & FSL_EDMA_DRV_HAS_CHMUX) |
137 | edma_writel_chreg(fsl_chan, 0, ch_mux); |
138 | |
139 | val &= ~EDMA_V3_CH_CSR_ERQ; |
140 | edma_writel_chreg(fsl_chan, val, ch_csr); |
141 | } |
142 | |
143 | void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan) |
144 | { |
145 | struct edma_regs *regs = &fsl_chan->edma->regs; |
146 | u32 ch = fsl_chan->vchan.chan.chan_id; |
147 | |
148 | if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_SPLIT_REG) |
149 | return fsl_edma3_disable_request(fsl_chan); |
150 | |
151 | if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) { |
152 | edma_writeb(edma: fsl_chan->edma, val: ch, addr: regs->cerq); |
153 | edma_writeb(edma: fsl_chan->edma, EDMA_CEEI_CEEI(ch), addr: regs->ceei); |
154 | } else { |
155 | /* ColdFire is big endian, and accesses natively |
156 | * big endian I/O peripherals |
157 | */ |
158 | iowrite8(ch, regs->cerq); |
159 | iowrite8(EDMA_CEEI_CEEI(ch), regs->ceei); |
160 | } |
161 | } |
162 | |
163 | static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr, |
164 | u32 off, u32 slot, bool enable) |
165 | { |
166 | u8 val8; |
167 | |
168 | if (enable) |
169 | val8 = EDMAMUX_CHCFG_ENBL | slot; |
170 | else |
171 | val8 = EDMAMUX_CHCFG_DIS; |
172 | |
173 | iowrite8(val8, addr + off); |
174 | } |
175 | |
176 | static void mux_configure32(struct fsl_edma_chan *fsl_chan, void __iomem *addr, |
177 | u32 off, u32 slot, bool enable) |
178 | { |
179 | u32 val; |
180 | |
181 | if (enable) |
182 | val = EDMAMUX_CHCFG_ENBL << 24 | slot; |
183 | else |
184 | val = EDMAMUX_CHCFG_DIS; |
185 | |
186 | iowrite32(val, addr + off * 4); |
187 | } |
188 | |
189 | void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan, |
190 | unsigned int slot, bool enable) |
191 | { |
192 | u32 ch = fsl_chan->vchan.chan.chan_id; |
193 | void __iomem *muxaddr; |
194 | unsigned int chans_per_mux, ch_off; |
195 | int endian_diff[4] = {3, 1, -1, -3}; |
196 | u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs; |
197 | |
198 | if (!dmamux_nr) |
199 | return; |
200 | |
201 | chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr; |
202 | ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux; |
203 | |
204 | if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_MUX_SWAP) |
205 | ch_off += endian_diff[ch_off % 4]; |
206 | |
207 | muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux]; |
208 | slot = EDMAMUX_CHCFG_SOURCE(slot); |
209 | |
210 | if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_CONFIG32) |
211 | mux_configure32(fsl_chan, addr: muxaddr, off: ch_off, slot, enable); |
212 | else |
213 | mux_configure8(fsl_chan, addr: muxaddr, off: ch_off, slot, enable); |
214 | } |
215 | |
216 | static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width) |
217 | { |
218 | u32 val; |
219 | |
220 | if (addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) |
221 | addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
222 | |
223 | val = ffs(addr_width) - 1; |
224 | return val | (val << 8); |
225 | } |
226 | |
227 | void fsl_edma_free_desc(struct virt_dma_desc *vdesc) |
228 | { |
229 | struct fsl_edma_desc *fsl_desc; |
230 | int i; |
231 | |
232 | fsl_desc = to_fsl_edma_desc(vd: vdesc); |
233 | for (i = 0; i < fsl_desc->n_tcds; i++) |
234 | dma_pool_free(pool: fsl_desc->echan->tcd_pool, vaddr: fsl_desc->tcd[i].vtcd, |
235 | addr: fsl_desc->tcd[i].ptcd); |
236 | kfree(objp: fsl_desc); |
237 | } |
238 | |
239 | int fsl_edma_terminate_all(struct dma_chan *chan) |
240 | { |
241 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); |
242 | unsigned long flags; |
243 | LIST_HEAD(head); |
244 | |
245 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); |
246 | fsl_edma_disable_request(fsl_chan); |
247 | fsl_chan->edesc = NULL; |
248 | fsl_chan->idle = true; |
249 | vchan_get_all_descriptors(vc: &fsl_chan->vchan, head: &head); |
250 | spin_unlock_irqrestore(lock: &fsl_chan->vchan.lock, flags); |
251 | vchan_dma_desc_free_list(vc: &fsl_chan->vchan, head: &head); |
252 | |
253 | if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_PD) |
254 | pm_runtime_allow(dev: fsl_chan->pd_dev); |
255 | |
256 | return 0; |
257 | } |
258 | |
259 | int fsl_edma_pause(struct dma_chan *chan) |
260 | { |
261 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); |
262 | unsigned long flags; |
263 | |
264 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); |
265 | if (fsl_chan->edesc) { |
266 | fsl_edma_disable_request(fsl_chan); |
267 | fsl_chan->status = DMA_PAUSED; |
268 | fsl_chan->idle = true; |
269 | } |
270 | spin_unlock_irqrestore(lock: &fsl_chan->vchan.lock, flags); |
271 | return 0; |
272 | } |
273 | |
274 | int fsl_edma_resume(struct dma_chan *chan) |
275 | { |
276 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); |
277 | unsigned long flags; |
278 | |
279 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); |
280 | if (fsl_chan->edesc) { |
281 | fsl_edma_enable_request(fsl_chan); |
282 | fsl_chan->status = DMA_IN_PROGRESS; |
283 | fsl_chan->idle = false; |
284 | } |
285 | spin_unlock_irqrestore(lock: &fsl_chan->vchan.lock, flags); |
286 | return 0; |
287 | } |
288 | |
289 | static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan) |
290 | { |
291 | if (fsl_chan->dma_dir != DMA_NONE) |
292 | dma_unmap_resource(dev: fsl_chan->vchan.chan.device->dev, |
293 | addr: fsl_chan->dma_dev_addr, |
294 | size: fsl_chan->dma_dev_size, |
295 | dir: fsl_chan->dma_dir, attrs: 0); |
296 | fsl_chan->dma_dir = DMA_NONE; |
297 | } |
298 | |
299 | static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan *fsl_chan, |
300 | enum dma_transfer_direction dir) |
301 | { |
302 | struct device *dev = fsl_chan->vchan.chan.device->dev; |
303 | enum dma_data_direction dma_dir; |
304 | phys_addr_t addr = 0; |
305 | u32 size = 0; |
306 | |
307 | switch (dir) { |
308 | case DMA_MEM_TO_DEV: |
309 | dma_dir = DMA_FROM_DEVICE; |
310 | addr = fsl_chan->cfg.dst_addr; |
311 | size = fsl_chan->cfg.dst_maxburst; |
312 | break; |
313 | case DMA_DEV_TO_MEM: |
314 | dma_dir = DMA_TO_DEVICE; |
315 | addr = fsl_chan->cfg.src_addr; |
316 | size = fsl_chan->cfg.src_maxburst; |
317 | break; |
318 | default: |
319 | dma_dir = DMA_NONE; |
320 | break; |
321 | } |
322 | |
323 | /* Already mapped for this config? */ |
324 | if (fsl_chan->dma_dir == dma_dir) |
325 | return true; |
326 | |
327 | fsl_edma_unprep_slave_dma(fsl_chan); |
328 | |
329 | fsl_chan->dma_dev_addr = dma_map_resource(dev, phys_addr: addr, size, dir: dma_dir, attrs: 0); |
330 | if (dma_mapping_error(dev, dma_addr: fsl_chan->dma_dev_addr)) |
331 | return false; |
332 | fsl_chan->dma_dev_size = size; |
333 | fsl_chan->dma_dir = dma_dir; |
334 | |
335 | return true; |
336 | } |
337 | |
338 | int fsl_edma_slave_config(struct dma_chan *chan, |
339 | struct dma_slave_config *cfg) |
340 | { |
341 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); |
342 | |
343 | memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg)); |
344 | fsl_edma_unprep_slave_dma(fsl_chan); |
345 | |
346 | return 0; |
347 | } |
348 | |
349 | static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan, |
350 | struct virt_dma_desc *vdesc, bool in_progress) |
351 | { |
352 | struct fsl_edma_desc *edesc = fsl_chan->edesc; |
353 | enum dma_transfer_direction dir = edesc->dirn; |
354 | dma_addr_t cur_addr, dma_addr; |
355 | size_t len, size; |
356 | u32 nbytes = 0; |
357 | int i; |
358 | |
359 | /* calculate the total size in this desc */ |
360 | for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++) { |
361 | nbytes = le32_to_cpu(edesc->tcd[i].vtcd->nbytes); |
362 | if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE)) |
363 | nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes); |
364 | len += nbytes * le16_to_cpu(edesc->tcd[i].vtcd->biter); |
365 | } |
366 | |
367 | if (!in_progress) |
368 | return len; |
369 | |
370 | if (dir == DMA_MEM_TO_DEV) |
371 | cur_addr = edma_read_tcdreg(fsl_chan, saddr); |
372 | else |
373 | cur_addr = edma_read_tcdreg(fsl_chan, daddr); |
374 | |
375 | /* figure out the finished and calculate the residue */ |
376 | for (i = 0; i < fsl_chan->edesc->n_tcds; i++) { |
377 | nbytes = le32_to_cpu(edesc->tcd[i].vtcd->nbytes); |
378 | if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE)) |
379 | nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes); |
380 | |
381 | size = nbytes * le16_to_cpu(edesc->tcd[i].vtcd->biter); |
382 | |
383 | if (dir == DMA_MEM_TO_DEV) |
384 | dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr); |
385 | else |
386 | dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr); |
387 | |
388 | len -= size; |
389 | if (cur_addr >= dma_addr && cur_addr < dma_addr + size) { |
390 | len += dma_addr + size - cur_addr; |
391 | break; |
392 | } |
393 | } |
394 | |
395 | return len; |
396 | } |
397 | |
398 | enum dma_status fsl_edma_tx_status(struct dma_chan *chan, |
399 | dma_cookie_t cookie, struct dma_tx_state *txstate) |
400 | { |
401 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); |
402 | struct virt_dma_desc *vdesc; |
403 | enum dma_status status; |
404 | unsigned long flags; |
405 | |
406 | status = dma_cookie_status(chan, cookie, state: txstate); |
407 | if (status == DMA_COMPLETE) |
408 | return status; |
409 | |
410 | if (!txstate) |
411 | return fsl_chan->status; |
412 | |
413 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); |
414 | vdesc = vchan_find_desc(&fsl_chan->vchan, cookie); |
415 | if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie) |
416 | txstate->residue = |
417 | fsl_edma_desc_residue(fsl_chan, vdesc, in_progress: true); |
418 | else if (vdesc) |
419 | txstate->residue = |
420 | fsl_edma_desc_residue(fsl_chan, vdesc, in_progress: false); |
421 | else |
422 | txstate->residue = 0; |
423 | |
424 | spin_unlock_irqrestore(lock: &fsl_chan->vchan.lock, flags); |
425 | |
426 | return fsl_chan->status; |
427 | } |
428 | |
429 | static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan, |
430 | struct fsl_edma_hw_tcd *tcd) |
431 | { |
432 | u16 csr = 0; |
433 | |
434 | /* |
435 | * TCD parameters are stored in struct fsl_edma_hw_tcd in little |
436 | * endian format. However, we need to load the TCD registers in |
437 | * big- or little-endian obeying the eDMA engine model endian, |
438 | * and this is performed from specific edma_write functions |
439 | */ |
440 | edma_write_tcdreg(fsl_chan, 0, csr); |
441 | |
442 | edma_write_tcdreg(fsl_chan, tcd->saddr, saddr); |
443 | edma_write_tcdreg(fsl_chan, tcd->daddr, daddr); |
444 | |
445 | edma_write_tcdreg(fsl_chan, tcd->attr, attr); |
446 | edma_write_tcdreg(fsl_chan, tcd->soff, soff); |
447 | |
448 | edma_write_tcdreg(fsl_chan, tcd->nbytes, nbytes); |
449 | edma_write_tcdreg(fsl_chan, tcd->slast, slast); |
450 | |
451 | edma_write_tcdreg(fsl_chan, tcd->citer, citer); |
452 | edma_write_tcdreg(fsl_chan, tcd->biter, biter); |
453 | edma_write_tcdreg(fsl_chan, tcd->doff, doff); |
454 | |
455 | edma_write_tcdreg(fsl_chan, tcd->dlast_sga, dlast_sga); |
456 | |
457 | csr = le16_to_cpu(tcd->csr); |
458 | |
459 | if (fsl_chan->is_sw) { |
460 | csr |= EDMA_TCD_CSR_START; |
461 | tcd->csr = cpu_to_le16(csr); |
462 | } |
463 | |
464 | /* |
465 | * Must clear CHn_CSR[DONE] bit before enable TCDn_CSR[ESG] at EDMAv3 |
466 | * eDMAv4 have not such requirement. |
467 | * Change MLINK need clear CHn_CSR[DONE] for both eDMAv3 and eDMAv4. |
468 | */ |
469 | if (((fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_CLEAR_DONE_E_SG) && |
470 | (csr & EDMA_TCD_CSR_E_SG)) || |
471 | ((fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_CLEAR_DONE_E_LINK) && |
472 | (csr & EDMA_TCD_CSR_E_LINK))) |
473 | edma_writel_chreg(fsl_chan, edma_readl_chreg(fsl_chan, ch_csr), ch_csr); |
474 | |
475 | |
476 | edma_write_tcdreg(fsl_chan, tcd->csr, csr); |
477 | } |
478 | |
479 | static inline |
480 | void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan, |
481 | struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst, |
482 | u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer, |
483 | u16 biter, u16 doff, u32 dlast_sga, bool major_int, |
484 | bool disable_req, bool enable_sg) |
485 | { |
486 | struct dma_slave_config *cfg = &fsl_chan->cfg; |
487 | u16 csr = 0; |
488 | u32 burst; |
489 | |
490 | /* |
491 | * eDMA hardware SGs require the TCDs to be stored in little |
492 | * endian format irrespective of the register endian model. |
493 | * So we put the value in little endian in memory, waiting |
494 | * for fsl_edma_set_tcd_regs doing the swap. |
495 | */ |
496 | tcd->saddr = cpu_to_le32(src); |
497 | tcd->daddr = cpu_to_le32(dst); |
498 | |
499 | tcd->attr = cpu_to_le16(attr); |
500 | |
501 | tcd->soff = cpu_to_le16(soff); |
502 | |
503 | if (fsl_chan->is_multi_fifo) { |
504 | /* set mloff to support multiple fifo */ |
505 | burst = cfg->direction == DMA_DEV_TO_MEM ? |
506 | cfg->src_addr_width : cfg->dst_addr_width; |
507 | nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-(burst * 4)); |
508 | /* enable DMLOE/SMLOE */ |
509 | if (cfg->direction == DMA_MEM_TO_DEV) { |
510 | nbytes |= EDMA_V3_TCD_NBYTES_DMLOE; |
511 | nbytes &= ~EDMA_V3_TCD_NBYTES_SMLOE; |
512 | } else { |
513 | nbytes |= EDMA_V3_TCD_NBYTES_SMLOE; |
514 | nbytes &= ~EDMA_V3_TCD_NBYTES_DMLOE; |
515 | } |
516 | } |
517 | |
518 | tcd->nbytes = cpu_to_le32(nbytes); |
519 | tcd->slast = cpu_to_le32(slast); |
520 | |
521 | tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer)); |
522 | tcd->doff = cpu_to_le16(doff); |
523 | |
524 | tcd->dlast_sga = cpu_to_le32(dlast_sga); |
525 | |
526 | tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter)); |
527 | if (major_int) |
528 | csr |= EDMA_TCD_CSR_INT_MAJOR; |
529 | |
530 | if (disable_req) |
531 | csr |= EDMA_TCD_CSR_D_REQ; |
532 | |
533 | if (enable_sg) |
534 | csr |= EDMA_TCD_CSR_E_SG; |
535 | |
536 | if (fsl_chan->is_rxchan) |
537 | csr |= EDMA_TCD_CSR_ACTIVE; |
538 | |
539 | if (fsl_chan->is_sw) |
540 | csr |= EDMA_TCD_CSR_START; |
541 | |
542 | tcd->csr = cpu_to_le16(csr); |
543 | } |
544 | |
545 | static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan, |
546 | int sg_len) |
547 | { |
548 | struct fsl_edma_desc *fsl_desc; |
549 | int i; |
550 | |
551 | fsl_desc = kzalloc(struct_size(fsl_desc, tcd, sg_len), GFP_NOWAIT); |
552 | if (!fsl_desc) |
553 | return NULL; |
554 | |
555 | fsl_desc->echan = fsl_chan; |
556 | fsl_desc->n_tcds = sg_len; |
557 | for (i = 0; i < sg_len; i++) { |
558 | fsl_desc->tcd[i].vtcd = dma_pool_alloc(pool: fsl_chan->tcd_pool, |
559 | GFP_NOWAIT, handle: &fsl_desc->tcd[i].ptcd); |
560 | if (!fsl_desc->tcd[i].vtcd) |
561 | goto err; |
562 | } |
563 | return fsl_desc; |
564 | |
565 | err: |
566 | while (--i >= 0) |
567 | dma_pool_free(pool: fsl_chan->tcd_pool, vaddr: fsl_desc->tcd[i].vtcd, |
568 | addr: fsl_desc->tcd[i].ptcd); |
569 | kfree(objp: fsl_desc); |
570 | return NULL; |
571 | } |
572 | |
573 | struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic( |
574 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, |
575 | size_t period_len, enum dma_transfer_direction direction, |
576 | unsigned long flags) |
577 | { |
578 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); |
579 | struct fsl_edma_desc *fsl_desc; |
580 | dma_addr_t dma_buf_next; |
581 | bool major_int = true; |
582 | int sg_len, i; |
583 | u32 src_addr, dst_addr, last_sg, nbytes; |
584 | u16 soff, doff, iter; |
585 | |
586 | if (!is_slave_direction(direction)) |
587 | return NULL; |
588 | |
589 | if (!fsl_edma_prep_slave_dma(fsl_chan, dir: direction)) |
590 | return NULL; |
591 | |
592 | sg_len = buf_len / period_len; |
593 | fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len); |
594 | if (!fsl_desc) |
595 | return NULL; |
596 | fsl_desc->iscyclic = true; |
597 | fsl_desc->dirn = direction; |
598 | |
599 | dma_buf_next = dma_addr; |
600 | if (direction == DMA_MEM_TO_DEV) { |
601 | fsl_chan->attr = |
602 | fsl_edma_get_tcd_attr(addr_width: fsl_chan->cfg.dst_addr_width); |
603 | nbytes = fsl_chan->cfg.dst_addr_width * |
604 | fsl_chan->cfg.dst_maxburst; |
605 | } else { |
606 | fsl_chan->attr = |
607 | fsl_edma_get_tcd_attr(addr_width: fsl_chan->cfg.src_addr_width); |
608 | nbytes = fsl_chan->cfg.src_addr_width * |
609 | fsl_chan->cfg.src_maxburst; |
610 | } |
611 | |
612 | iter = period_len / nbytes; |
613 | |
614 | for (i = 0; i < sg_len; i++) { |
615 | if (dma_buf_next >= dma_addr + buf_len) |
616 | dma_buf_next = dma_addr; |
617 | |
618 | /* get next sg's physical address */ |
619 | last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd; |
620 | |
621 | if (direction == DMA_MEM_TO_DEV) { |
622 | src_addr = dma_buf_next; |
623 | dst_addr = fsl_chan->dma_dev_addr; |
624 | soff = fsl_chan->cfg.dst_addr_width; |
625 | doff = fsl_chan->is_multi_fifo ? 4 : 0; |
626 | } else if (direction == DMA_DEV_TO_MEM) { |
627 | src_addr = fsl_chan->dma_dev_addr; |
628 | dst_addr = dma_buf_next; |
629 | soff = fsl_chan->is_multi_fifo ? 4 : 0; |
630 | doff = fsl_chan->cfg.src_addr_width; |
631 | } else { |
632 | /* DMA_DEV_TO_DEV */ |
633 | src_addr = fsl_chan->cfg.src_addr; |
634 | dst_addr = fsl_chan->cfg.dst_addr; |
635 | soff = doff = 0; |
636 | major_int = false; |
637 | } |
638 | |
639 | fsl_edma_fill_tcd(fsl_chan, tcd: fsl_desc->tcd[i].vtcd, src: src_addr, dst: dst_addr, |
640 | attr: fsl_chan->attr, soff, nbytes, slast: 0, citer: iter, |
641 | biter: iter, doff, dlast_sga: last_sg, major_int, disable_req: false, enable_sg: true); |
642 | dma_buf_next += period_len; |
643 | } |
644 | |
645 | return vchan_tx_prep(vc: &fsl_chan->vchan, vd: &fsl_desc->vdesc, tx_flags: flags); |
646 | } |
647 | |
648 | struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg( |
649 | struct dma_chan *chan, struct scatterlist *sgl, |
650 | unsigned int sg_len, enum dma_transfer_direction direction, |
651 | unsigned long flags, void *context) |
652 | { |
653 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); |
654 | struct fsl_edma_desc *fsl_desc; |
655 | struct scatterlist *sg; |
656 | u32 src_addr, dst_addr, last_sg, nbytes; |
657 | u16 soff, doff, iter; |
658 | int i; |
659 | |
660 | if (!is_slave_direction(direction)) |
661 | return NULL; |
662 | |
663 | if (!fsl_edma_prep_slave_dma(fsl_chan, dir: direction)) |
664 | return NULL; |
665 | |
666 | fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len); |
667 | if (!fsl_desc) |
668 | return NULL; |
669 | fsl_desc->iscyclic = false; |
670 | fsl_desc->dirn = direction; |
671 | |
672 | if (direction == DMA_MEM_TO_DEV) { |
673 | fsl_chan->attr = |
674 | fsl_edma_get_tcd_attr(addr_width: fsl_chan->cfg.dst_addr_width); |
675 | nbytes = fsl_chan->cfg.dst_addr_width * |
676 | fsl_chan->cfg.dst_maxburst; |
677 | } else { |
678 | fsl_chan->attr = |
679 | fsl_edma_get_tcd_attr(addr_width: fsl_chan->cfg.src_addr_width); |
680 | nbytes = fsl_chan->cfg.src_addr_width * |
681 | fsl_chan->cfg.src_maxburst; |
682 | } |
683 | |
684 | for_each_sg(sgl, sg, sg_len, i) { |
685 | if (direction == DMA_MEM_TO_DEV) { |
686 | src_addr = sg_dma_address(sg); |
687 | dst_addr = fsl_chan->dma_dev_addr; |
688 | soff = fsl_chan->cfg.dst_addr_width; |
689 | doff = 0; |
690 | } else if (direction == DMA_DEV_TO_MEM) { |
691 | src_addr = fsl_chan->dma_dev_addr; |
692 | dst_addr = sg_dma_address(sg); |
693 | soff = 0; |
694 | doff = fsl_chan->cfg.src_addr_width; |
695 | } else { |
696 | /* DMA_DEV_TO_DEV */ |
697 | src_addr = fsl_chan->cfg.src_addr; |
698 | dst_addr = fsl_chan->cfg.dst_addr; |
699 | soff = 0; |
700 | doff = 0; |
701 | } |
702 | |
703 | /* |
704 | * Choose the suitable burst length if sg_dma_len is not |
705 | * multiple of burst length so that the whole transfer length is |
706 | * multiple of minor loop(burst length). |
707 | */ |
708 | if (sg_dma_len(sg) % nbytes) { |
709 | u32 width = (direction == DMA_DEV_TO_MEM) ? doff : soff; |
710 | u32 burst = (direction == DMA_DEV_TO_MEM) ? |
711 | fsl_chan->cfg.src_maxburst : |
712 | fsl_chan->cfg.dst_maxburst; |
713 | int j; |
714 | |
715 | for (j = burst; j > 1; j--) { |
716 | if (!(sg_dma_len(sg) % (j * width))) { |
717 | nbytes = j * width; |
718 | break; |
719 | } |
720 | } |
721 | /* Set burst size as 1 if there's no suitable one */ |
722 | if (j == 1) |
723 | nbytes = width; |
724 | } |
725 | iter = sg_dma_len(sg) / nbytes; |
726 | if (i < sg_len - 1) { |
727 | last_sg = fsl_desc->tcd[(i + 1)].ptcd; |
728 | fsl_edma_fill_tcd(fsl_chan, tcd: fsl_desc->tcd[i].vtcd, src: src_addr, |
729 | dst: dst_addr, attr: fsl_chan->attr, soff, |
730 | nbytes, slast: 0, citer: iter, biter: iter, doff, dlast_sga: last_sg, |
731 | major_int: false, disable_req: false, enable_sg: true); |
732 | } else { |
733 | last_sg = 0; |
734 | fsl_edma_fill_tcd(fsl_chan, tcd: fsl_desc->tcd[i].vtcd, src: src_addr, |
735 | dst: dst_addr, attr: fsl_chan->attr, soff, |
736 | nbytes, slast: 0, citer: iter, biter: iter, doff, dlast_sga: last_sg, |
737 | major_int: true, disable_req: true, enable_sg: false); |
738 | } |
739 | } |
740 | |
741 | return vchan_tx_prep(vc: &fsl_chan->vchan, vd: &fsl_desc->vdesc, tx_flags: flags); |
742 | } |
743 | |
744 | struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan, |
745 | dma_addr_t dma_dst, dma_addr_t dma_src, |
746 | size_t len, unsigned long flags) |
747 | { |
748 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); |
749 | struct fsl_edma_desc *fsl_desc; |
750 | |
751 | fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len: 1); |
752 | if (!fsl_desc) |
753 | return NULL; |
754 | fsl_desc->iscyclic = false; |
755 | |
756 | fsl_chan->is_sw = true; |
757 | |
758 | /* To match with copy_align and max_seg_size so 1 tcd is enough */ |
759 | fsl_edma_fill_tcd(fsl_chan, tcd: fsl_desc->tcd[0].vtcd, src: dma_src, dst: dma_dst, |
760 | attr: fsl_edma_get_tcd_attr(addr_width: DMA_SLAVE_BUSWIDTH_32_BYTES), |
761 | soff: 32, nbytes: len, slast: 0, citer: 1, biter: 1, doff: 32, dlast_sga: 0, major_int: true, disable_req: true, enable_sg: false); |
762 | |
763 | return vchan_tx_prep(vc: &fsl_chan->vchan, vd: &fsl_desc->vdesc, tx_flags: flags); |
764 | } |
765 | |
766 | void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan) |
767 | { |
768 | struct virt_dma_desc *vdesc; |
769 | |
770 | lockdep_assert_held(&fsl_chan->vchan.lock); |
771 | |
772 | vdesc = vchan_next_desc(vc: &fsl_chan->vchan); |
773 | if (!vdesc) |
774 | return; |
775 | fsl_chan->edesc = to_fsl_edma_desc(vd: vdesc); |
776 | fsl_edma_set_tcd_regs(fsl_chan, tcd: fsl_chan->edesc->tcd[0].vtcd); |
777 | fsl_edma_enable_request(fsl_chan); |
778 | fsl_chan->status = DMA_IN_PROGRESS; |
779 | fsl_chan->idle = false; |
780 | } |
781 | |
782 | void fsl_edma_issue_pending(struct dma_chan *chan) |
783 | { |
784 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); |
785 | unsigned long flags; |
786 | |
787 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); |
788 | |
789 | if (unlikely(fsl_chan->pm_state != RUNNING)) { |
790 | spin_unlock_irqrestore(lock: &fsl_chan->vchan.lock, flags); |
791 | /* cannot submit due to suspend */ |
792 | return; |
793 | } |
794 | |
795 | if (vchan_issue_pending(vc: &fsl_chan->vchan) && !fsl_chan->edesc) |
796 | fsl_edma_xfer_desc(fsl_chan); |
797 | |
798 | spin_unlock_irqrestore(lock: &fsl_chan->vchan.lock, flags); |
799 | } |
800 | |
801 | int fsl_edma_alloc_chan_resources(struct dma_chan *chan) |
802 | { |
803 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); |
804 | |
805 | fsl_chan->tcd_pool = dma_pool_create(name: "tcd_pool" , dev: chan->device->dev, |
806 | size: sizeof(struct fsl_edma_hw_tcd), |
807 | align: 32, allocation: 0); |
808 | return 0; |
809 | } |
810 | |
811 | void fsl_edma_free_chan_resources(struct dma_chan *chan) |
812 | { |
813 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); |
814 | struct fsl_edma_engine *edma = fsl_chan->edma; |
815 | unsigned long flags; |
816 | LIST_HEAD(head); |
817 | |
818 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); |
819 | fsl_edma_disable_request(fsl_chan); |
820 | if (edma->drvdata->dmamuxs) |
821 | fsl_edma_chan_mux(fsl_chan, slot: 0, enable: false); |
822 | fsl_chan->edesc = NULL; |
823 | vchan_get_all_descriptors(vc: &fsl_chan->vchan, head: &head); |
824 | fsl_edma_unprep_slave_dma(fsl_chan); |
825 | spin_unlock_irqrestore(lock: &fsl_chan->vchan.lock, flags); |
826 | |
827 | vchan_dma_desc_free_list(vc: &fsl_chan->vchan, head: &head); |
828 | dma_pool_destroy(pool: fsl_chan->tcd_pool); |
829 | fsl_chan->tcd_pool = NULL; |
830 | fsl_chan->is_sw = false; |
831 | } |
832 | |
833 | void fsl_edma_cleanup_vchan(struct dma_device *dmadev) |
834 | { |
835 | struct fsl_edma_chan *chan, *_chan; |
836 | |
837 | list_for_each_entry_safe(chan, _chan, |
838 | &dmadev->channels, vchan.chan.device_node) { |
839 | list_del(entry: &chan->vchan.chan.device_node); |
840 | tasklet_kill(t: &chan->vchan.task); |
841 | } |
842 | } |
843 | |
844 | /* |
845 | * On the 32 channels Vybrid/mpc577x edma version, register offsets are |
846 | * different compared to ColdFire mcf5441x 64 channels edma. |
847 | * |
848 | * This function sets up register offsets as per proper declared version |
849 | * so must be called in xxx_edma_probe() just after setting the |
850 | * edma "version" and "membase" appropriately. |
851 | */ |
852 | void fsl_edma_setup_regs(struct fsl_edma_engine *edma) |
853 | { |
854 | bool is64 = !!(edma->drvdata->flags & FSL_EDMA_DRV_EDMA64); |
855 | |
856 | edma->regs.cr = edma->membase + EDMA_CR; |
857 | edma->regs.es = edma->membase + EDMA_ES; |
858 | edma->regs.erql = edma->membase + EDMA_ERQ; |
859 | edma->regs.eeil = edma->membase + EDMA_EEI; |
860 | |
861 | edma->regs.serq = edma->membase + (is64 ? EDMA64_SERQ : EDMA_SERQ); |
862 | edma->regs.cerq = edma->membase + (is64 ? EDMA64_CERQ : EDMA_CERQ); |
863 | edma->regs.seei = edma->membase + (is64 ? EDMA64_SEEI : EDMA_SEEI); |
864 | edma->regs.ceei = edma->membase + (is64 ? EDMA64_CEEI : EDMA_CEEI); |
865 | edma->regs.cint = edma->membase + (is64 ? EDMA64_CINT : EDMA_CINT); |
866 | edma->regs.cerr = edma->membase + (is64 ? EDMA64_CERR : EDMA_CERR); |
867 | edma->regs.ssrt = edma->membase + (is64 ? EDMA64_SSRT : EDMA_SSRT); |
868 | edma->regs.cdne = edma->membase + (is64 ? EDMA64_CDNE : EDMA_CDNE); |
869 | edma->regs.intl = edma->membase + (is64 ? EDMA64_INTL : EDMA_INTR); |
870 | edma->regs.errl = edma->membase + (is64 ? EDMA64_ERRL : EDMA_ERR); |
871 | |
872 | if (is64) { |
873 | edma->regs.erqh = edma->membase + EDMA64_ERQH; |
874 | edma->regs.eeih = edma->membase + EDMA64_EEIH; |
875 | edma->regs.errh = edma->membase + EDMA64_ERRH; |
876 | edma->regs.inth = edma->membase + EDMA64_INTH; |
877 | } |
878 | } |
879 | |
880 | MODULE_LICENSE("GPL v2" ); |
881 | |