1 | // SPDX-License-Identifier: GPL-2.0 |
2 | // (C) 2017-2018 Synopsys, Inc. (www.synopsys.com) |
3 | |
4 | /* |
5 | * Synopsys DesignWare AXI DMA Controller driver. |
6 | * |
7 | * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com> |
8 | */ |
9 | |
10 | #include <linux/bitops.h> |
11 | #include <linux/delay.h> |
12 | #include <linux/device.h> |
13 | #include <linux/dmaengine.h> |
14 | #include <linux/dmapool.h> |
15 | #include <linux/dma-mapping.h> |
16 | #include <linux/err.h> |
17 | #include <linux/interrupt.h> |
18 | #include <linux/io.h> |
19 | #include <linux/iopoll.h> |
20 | #include <linux/io-64-nonatomic-lo-hi.h> |
21 | #include <linux/kernel.h> |
22 | #include <linux/module.h> |
23 | #include <linux/of.h> |
24 | #include <linux/of_dma.h> |
25 | #include <linux/platform_device.h> |
26 | #include <linux/pm_runtime.h> |
27 | #include <linux/property.h> |
28 | #include <linux/reset.h> |
29 | #include <linux/slab.h> |
30 | #include <linux/types.h> |
31 | |
32 | #include "dw-axi-dmac.h" |
33 | #include "../dmaengine.h" |
34 | #include "../virt-dma.h" |
35 | |
36 | /* |
37 | * The set of bus widths supported by the DMA controller. DW AXI DMAC supports |
38 | * master data bus width up to 512 bits (for both AXI master interfaces), but |
39 | * it depends on IP block configuration. |
40 | */ |
41 | #define AXI_DMA_BUSWIDTHS \ |
42 | (DMA_SLAVE_BUSWIDTH_1_BYTE | \ |
43 | DMA_SLAVE_BUSWIDTH_2_BYTES | \ |
44 | DMA_SLAVE_BUSWIDTH_4_BYTES | \ |
45 | DMA_SLAVE_BUSWIDTH_8_BYTES | \ |
46 | DMA_SLAVE_BUSWIDTH_16_BYTES | \ |
47 | DMA_SLAVE_BUSWIDTH_32_BYTES | \ |
48 | DMA_SLAVE_BUSWIDTH_64_BYTES) |
49 | |
50 | #define AXI_DMA_FLAG_HAS_APB_REGS BIT(0) |
51 | #define AXI_DMA_FLAG_HAS_RESETS BIT(1) |
52 | #define AXI_DMA_FLAG_USE_CFG2 BIT(2) |
53 | |
54 | static inline void |
55 | axi_dma_iowrite32(struct axi_dma_chip *chip, u32 reg, u32 val) |
56 | { |
57 | iowrite32(val, chip->regs + reg); |
58 | } |
59 | |
60 | static inline u32 axi_dma_ioread32(struct axi_dma_chip *chip, u32 reg) |
61 | { |
62 | return ioread32(chip->regs + reg); |
63 | } |
64 | |
65 | static inline void |
66 | axi_dma_iowrite64(struct axi_dma_chip *chip, u32 reg, u64 val) |
67 | { |
68 | iowrite64(val, addr: chip->regs + reg); |
69 | } |
70 | |
71 | static inline u64 axi_dma_ioread64(struct axi_dma_chip *chip, u32 reg) |
72 | { |
73 | return ioread64(addr: chip->regs + reg); |
74 | } |
75 | |
76 | static inline void |
77 | axi_chan_iowrite32(struct axi_dma_chan *chan, u32 reg, u32 val) |
78 | { |
79 | iowrite32(val, chan->chan_regs + reg); |
80 | } |
81 | |
82 | static inline u32 axi_chan_ioread32(struct axi_dma_chan *chan, u32 reg) |
83 | { |
84 | return ioread32(chan->chan_regs + reg); |
85 | } |
86 | |
87 | static inline void |
88 | axi_chan_iowrite64(struct axi_dma_chan *chan, u32 reg, u64 val) |
89 | { |
90 | /* |
91 | * We split one 64 bit write for two 32 bit write as some HW doesn't |
92 | * support 64 bit access. |
93 | */ |
94 | iowrite32(lower_32_bits(val), chan->chan_regs + reg); |
95 | iowrite32(upper_32_bits(val), chan->chan_regs + reg + 4); |
96 | } |
97 | |
98 | static inline void axi_chan_config_write(struct axi_dma_chan *chan, |
99 | struct axi_dma_chan_config *config) |
100 | { |
101 | u32 cfg_lo, cfg_hi; |
102 | |
103 | cfg_lo = (config->dst_multblk_type << CH_CFG_L_DST_MULTBLK_TYPE_POS | |
104 | config->src_multblk_type << CH_CFG_L_SRC_MULTBLK_TYPE_POS); |
105 | if (chan->chip->dw->hdata->reg_map_8_channels && |
106 | !chan->chip->dw->hdata->use_cfg2) { |
107 | cfg_hi = config->tt_fc << CH_CFG_H_TT_FC_POS | |
108 | config->hs_sel_src << CH_CFG_H_HS_SEL_SRC_POS | |
109 | config->hs_sel_dst << CH_CFG_H_HS_SEL_DST_POS | |
110 | config->src_per << CH_CFG_H_SRC_PER_POS | |
111 | config->dst_per << CH_CFG_H_DST_PER_POS | |
112 | config->prior << CH_CFG_H_PRIORITY_POS; |
113 | } else { |
114 | cfg_lo |= config->src_per << CH_CFG2_L_SRC_PER_POS | |
115 | config->dst_per << CH_CFG2_L_DST_PER_POS; |
116 | cfg_hi = config->tt_fc << CH_CFG2_H_TT_FC_POS | |
117 | config->hs_sel_src << CH_CFG2_H_HS_SEL_SRC_POS | |
118 | config->hs_sel_dst << CH_CFG2_H_HS_SEL_DST_POS | |
119 | config->prior << CH_CFG2_H_PRIORITY_POS; |
120 | } |
121 | axi_chan_iowrite32(chan, CH_CFG_L, val: cfg_lo); |
122 | axi_chan_iowrite32(chan, CH_CFG_H, val: cfg_hi); |
123 | } |
124 | |
125 | static inline void axi_dma_disable(struct axi_dma_chip *chip) |
126 | { |
127 | u32 val; |
128 | |
129 | val = axi_dma_ioread32(chip, DMAC_CFG); |
130 | val &= ~DMAC_EN_MASK; |
131 | axi_dma_iowrite32(chip, DMAC_CFG, val); |
132 | } |
133 | |
134 | static inline void axi_dma_enable(struct axi_dma_chip *chip) |
135 | { |
136 | u32 val; |
137 | |
138 | val = axi_dma_ioread32(chip, DMAC_CFG); |
139 | val |= DMAC_EN_MASK; |
140 | axi_dma_iowrite32(chip, DMAC_CFG, val); |
141 | } |
142 | |
143 | static inline void axi_dma_irq_disable(struct axi_dma_chip *chip) |
144 | { |
145 | u32 val; |
146 | |
147 | val = axi_dma_ioread32(chip, DMAC_CFG); |
148 | val &= ~INT_EN_MASK; |
149 | axi_dma_iowrite32(chip, DMAC_CFG, val); |
150 | } |
151 | |
152 | static inline void axi_dma_irq_enable(struct axi_dma_chip *chip) |
153 | { |
154 | u32 val; |
155 | |
156 | val = axi_dma_ioread32(chip, DMAC_CFG); |
157 | val |= INT_EN_MASK; |
158 | axi_dma_iowrite32(chip, DMAC_CFG, val); |
159 | } |
160 | |
161 | static inline void axi_chan_irq_disable(struct axi_dma_chan *chan, u32 irq_mask) |
162 | { |
163 | u32 val; |
164 | |
165 | if (likely(irq_mask == DWAXIDMAC_IRQ_ALL)) { |
166 | axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, val: DWAXIDMAC_IRQ_NONE); |
167 | } else { |
168 | val = axi_chan_ioread32(chan, CH_INTSTATUS_ENA); |
169 | val &= ~irq_mask; |
170 | axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, val); |
171 | } |
172 | } |
173 | |
174 | static inline void axi_chan_irq_set(struct axi_dma_chan *chan, u32 irq_mask) |
175 | { |
176 | axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, val: irq_mask); |
177 | } |
178 | |
179 | static inline void axi_chan_irq_sig_set(struct axi_dma_chan *chan, u32 irq_mask) |
180 | { |
181 | axi_chan_iowrite32(chan, CH_INTSIGNAL_ENA, val: irq_mask); |
182 | } |
183 | |
184 | static inline void axi_chan_irq_clear(struct axi_dma_chan *chan, u32 irq_mask) |
185 | { |
186 | axi_chan_iowrite32(chan, CH_INTCLEAR, val: irq_mask); |
187 | } |
188 | |
189 | static inline u32 axi_chan_irq_read(struct axi_dma_chan *chan) |
190 | { |
191 | return axi_chan_ioread32(chan, CH_INTSTATUS); |
192 | } |
193 | |
194 | static inline void axi_chan_disable(struct axi_dma_chan *chan) |
195 | { |
196 | u64 val; |
197 | |
198 | if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) { |
199 | val = axi_dma_ioread64(chip: chan->chip, DMAC_CHEN); |
200 | if (chan->id >= DMAC_CHAN_16) { |
201 | val &= ~((u64)(BIT(chan->id) >> DMAC_CHAN_16) |
202 | << (DMAC_CHAN_EN_SHIFT + DMAC_CHAN_BLOCK_SHIFT)); |
203 | val |= (u64)(BIT(chan->id) >> DMAC_CHAN_16) |
204 | << (DMAC_CHAN_EN2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT); |
205 | } else { |
206 | val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT); |
207 | val |= BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT; |
208 | } |
209 | axi_dma_iowrite64(chip: chan->chip, DMAC_CHEN, val); |
210 | } else { |
211 | val = axi_dma_ioread32(chip: chan->chip, DMAC_CHEN); |
212 | val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT); |
213 | if (chan->chip->dw->hdata->reg_map_8_channels) |
214 | val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT; |
215 | else |
216 | val |= BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT; |
217 | axi_dma_iowrite32(chip: chan->chip, DMAC_CHEN, val: (u32)val); |
218 | } |
219 | } |
220 | |
221 | static inline void axi_chan_enable(struct axi_dma_chan *chan) |
222 | { |
223 | u64 val; |
224 | |
225 | if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) { |
226 | val = axi_dma_ioread64(chip: chan->chip, DMAC_CHEN); |
227 | if (chan->id >= DMAC_CHAN_16) { |
228 | val |= (u64)(BIT(chan->id) >> DMAC_CHAN_16) |
229 | << (DMAC_CHAN_EN_SHIFT + DMAC_CHAN_BLOCK_SHIFT) | |
230 | (u64)(BIT(chan->id) >> DMAC_CHAN_16) |
231 | << (DMAC_CHAN_EN2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT); |
232 | } else { |
233 | val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT | |
234 | BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT; |
235 | } |
236 | axi_dma_iowrite64(chip: chan->chip, DMAC_CHEN, val); |
237 | } else { |
238 | val = axi_dma_ioread32(chip: chan->chip, DMAC_CHEN); |
239 | if (chan->chip->dw->hdata->reg_map_8_channels) { |
240 | val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT | |
241 | BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT; |
242 | } else { |
243 | val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT | |
244 | BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT; |
245 | } |
246 | axi_dma_iowrite32(chip: chan->chip, DMAC_CHEN, val: (u32)val); |
247 | } |
248 | } |
249 | |
250 | static inline bool axi_chan_is_hw_enable(struct axi_dma_chan *chan) |
251 | { |
252 | u64 val; |
253 | |
254 | if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) |
255 | val = axi_dma_ioread64(chip: chan->chip, DMAC_CHEN); |
256 | else |
257 | val = axi_dma_ioread32(chip: chan->chip, DMAC_CHEN); |
258 | |
259 | if (chan->id >= DMAC_CHAN_16) |
260 | return !!(val & ((u64)(BIT(chan->id) >> DMAC_CHAN_16) << DMAC_CHAN_BLOCK_SHIFT)); |
261 | else |
262 | return !!(val & (BIT(chan->id) << DMAC_CHAN_EN_SHIFT)); |
263 | } |
264 | |
265 | static void axi_dma_hw_init(struct axi_dma_chip *chip) |
266 | { |
267 | int ret; |
268 | u32 i; |
269 | |
270 | for (i = 0; i < chip->dw->hdata->nr_channels; i++) { |
271 | axi_chan_irq_disable(chan: &chip->dw->chan[i], irq_mask: DWAXIDMAC_IRQ_ALL); |
272 | axi_chan_disable(chan: &chip->dw->chan[i]); |
273 | } |
274 | ret = dma_set_mask_and_coherent(dev: chip->dev, DMA_BIT_MASK(64)); |
275 | if (ret) |
276 | dev_warn(chip->dev, "Unable to set coherent mask\n" ); |
277 | } |
278 | |
279 | static u32 axi_chan_get_xfer_width(struct axi_dma_chan *chan, dma_addr_t src, |
280 | dma_addr_t dst, size_t len) |
281 | { |
282 | u32 max_width = chan->chip->dw->hdata->m_data_width; |
283 | |
284 | return __ffs(src | dst | len | BIT(max_width)); |
285 | } |
286 | |
287 | static inline const char *axi_chan_name(struct axi_dma_chan *chan) |
288 | { |
289 | return dma_chan_name(chan: &chan->vc.chan); |
290 | } |
291 | |
292 | static struct axi_dma_desc *axi_desc_alloc(u32 num) |
293 | { |
294 | struct axi_dma_desc *desc; |
295 | |
296 | desc = kzalloc(size: sizeof(*desc), GFP_NOWAIT); |
297 | if (!desc) |
298 | return NULL; |
299 | |
300 | desc->hw_desc = kcalloc(n: num, size: sizeof(*desc->hw_desc), GFP_NOWAIT); |
301 | if (!desc->hw_desc) { |
302 | kfree(objp: desc); |
303 | return NULL; |
304 | } |
305 | |
306 | return desc; |
307 | } |
308 | |
309 | static struct axi_dma_lli *axi_desc_get(struct axi_dma_chan *chan, |
310 | dma_addr_t *addr) |
311 | { |
312 | struct axi_dma_lli *lli; |
313 | dma_addr_t phys; |
314 | |
315 | lli = dma_pool_zalloc(pool: chan->desc_pool, GFP_NOWAIT, handle: &phys); |
316 | if (unlikely(!lli)) { |
317 | dev_err(chan2dev(chan), "%s: not enough descriptors available\n" , |
318 | axi_chan_name(chan)); |
319 | return NULL; |
320 | } |
321 | |
322 | atomic_inc(v: &chan->descs_allocated); |
323 | *addr = phys; |
324 | |
325 | return lli; |
326 | } |
327 | |
328 | static void axi_desc_put(struct axi_dma_desc *desc) |
329 | { |
330 | struct axi_dma_chan *chan = desc->chan; |
331 | int count = atomic_read(v: &chan->descs_allocated); |
332 | struct axi_dma_hw_desc *hw_desc; |
333 | int descs_put; |
334 | |
335 | for (descs_put = 0; descs_put < count; descs_put++) { |
336 | hw_desc = &desc->hw_desc[descs_put]; |
337 | dma_pool_free(pool: chan->desc_pool, vaddr: hw_desc->lli, addr: hw_desc->llp); |
338 | } |
339 | |
340 | kfree(objp: desc->hw_desc); |
341 | kfree(objp: desc); |
342 | atomic_sub(i: descs_put, v: &chan->descs_allocated); |
343 | dev_vdbg(chan2dev(chan), "%s: %d descs put, %d still allocated\n" , |
344 | axi_chan_name(chan), descs_put, |
345 | atomic_read(&chan->descs_allocated)); |
346 | } |
347 | |
348 | static void vchan_desc_put(struct virt_dma_desc *vdesc) |
349 | { |
350 | axi_desc_put(desc: vd_to_axi_desc(vd: vdesc)); |
351 | } |
352 | |
353 | static enum dma_status |
354 | dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, |
355 | struct dma_tx_state *txstate) |
356 | { |
357 | struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); |
358 | struct virt_dma_desc *vdesc; |
359 | enum dma_status status; |
360 | u32 completed_length; |
361 | unsigned long flags; |
362 | u32 completed_blocks; |
363 | size_t bytes = 0; |
364 | u32 length; |
365 | u32 len; |
366 | |
367 | status = dma_cookie_status(chan: dchan, cookie, state: txstate); |
368 | if (status == DMA_COMPLETE || !txstate) |
369 | return status; |
370 | |
371 | spin_lock_irqsave(&chan->vc.lock, flags); |
372 | |
373 | vdesc = vchan_find_desc(&chan->vc, cookie); |
374 | if (vdesc) { |
375 | length = vd_to_axi_desc(vd: vdesc)->length; |
376 | completed_blocks = vd_to_axi_desc(vd: vdesc)->completed_blocks; |
377 | len = vd_to_axi_desc(vd: vdesc)->hw_desc[0].len; |
378 | completed_length = completed_blocks * len; |
379 | bytes = length - completed_length; |
380 | } |
381 | |
382 | spin_unlock_irqrestore(lock: &chan->vc.lock, flags); |
383 | dma_set_residue(state: txstate, residue: bytes); |
384 | |
385 | return status; |
386 | } |
387 | |
388 | static void write_desc_llp(struct axi_dma_hw_desc *desc, dma_addr_t adr) |
389 | { |
390 | desc->lli->llp = cpu_to_le64(adr); |
391 | } |
392 | |
393 | static void write_chan_llp(struct axi_dma_chan *chan, dma_addr_t adr) |
394 | { |
395 | axi_chan_iowrite64(chan, CH_LLP, val: adr); |
396 | } |
397 | |
398 | static void dw_axi_dma_set_byte_halfword(struct axi_dma_chan *chan, bool set) |
399 | { |
400 | u32 offset = DMAC_APB_BYTE_WR_CH_EN; |
401 | u32 reg_width, val; |
402 | |
403 | if (!chan->chip->apb_regs) { |
404 | dev_dbg(chan->chip->dev, "apb_regs not initialized\n" ); |
405 | return; |
406 | } |
407 | |
408 | reg_width = __ffs(chan->config.dst_addr_width); |
409 | if (reg_width == DWAXIDMAC_TRANS_WIDTH_16) |
410 | offset = DMAC_APB_HALFWORD_WR_CH_EN; |
411 | |
412 | val = ioread32(chan->chip->apb_regs + offset); |
413 | |
414 | if (set) |
415 | val |= BIT(chan->id); |
416 | else |
417 | val &= ~BIT(chan->id); |
418 | |
419 | iowrite32(val, chan->chip->apb_regs + offset); |
420 | } |
421 | /* Called in chan locked context */ |
422 | static void axi_chan_block_xfer_start(struct axi_dma_chan *chan, |
423 | struct axi_dma_desc *first) |
424 | { |
425 | u32 priority = chan->chip->dw->hdata->priority[chan->id]; |
426 | struct axi_dma_chan_config config = {}; |
427 | u32 irq_mask; |
428 | u8 lms = 0; /* Select AXI0 master for LLI fetching */ |
429 | |
430 | if (unlikely(axi_chan_is_hw_enable(chan))) { |
431 | dev_err(chan2dev(chan), "%s is non-idle!\n" , |
432 | axi_chan_name(chan)); |
433 | |
434 | return; |
435 | } |
436 | |
437 | axi_dma_enable(chip: chan->chip); |
438 | |
439 | config.dst_multblk_type = DWAXIDMAC_MBLK_TYPE_LL; |
440 | config.src_multblk_type = DWAXIDMAC_MBLK_TYPE_LL; |
441 | config.tt_fc = DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC; |
442 | config.prior = priority; |
443 | config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW; |
444 | config.hs_sel_src = DWAXIDMAC_HS_SEL_HW; |
445 | switch (chan->direction) { |
446 | case DMA_MEM_TO_DEV: |
447 | dw_axi_dma_set_byte_halfword(chan, set: true); |
448 | config.tt_fc = chan->config.device_fc ? |
449 | DWAXIDMAC_TT_FC_MEM_TO_PER_DST : |
450 | DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC; |
451 | if (chan->chip->apb_regs) |
452 | config.dst_per = chan->id; |
453 | else |
454 | config.dst_per = chan->hw_handshake_num; |
455 | break; |
456 | case DMA_DEV_TO_MEM: |
457 | config.tt_fc = chan->config.device_fc ? |
458 | DWAXIDMAC_TT_FC_PER_TO_MEM_SRC : |
459 | DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC; |
460 | if (chan->chip->apb_regs) |
461 | config.src_per = chan->id; |
462 | else |
463 | config.src_per = chan->hw_handshake_num; |
464 | break; |
465 | default: |
466 | break; |
467 | } |
468 | axi_chan_config_write(chan, config: &config); |
469 | |
470 | write_chan_llp(chan, adr: first->hw_desc[0].llp | lms); |
471 | |
472 | irq_mask = DWAXIDMAC_IRQ_DMA_TRF | DWAXIDMAC_IRQ_ALL_ERR; |
473 | axi_chan_irq_sig_set(chan, irq_mask); |
474 | |
475 | /* Generate 'suspend' status but don't generate interrupt */ |
476 | irq_mask |= DWAXIDMAC_IRQ_SUSPENDED; |
477 | axi_chan_irq_set(chan, irq_mask); |
478 | |
479 | axi_chan_enable(chan); |
480 | } |
481 | |
482 | static void axi_chan_start_first_queued(struct axi_dma_chan *chan) |
483 | { |
484 | struct axi_dma_desc *desc; |
485 | struct virt_dma_desc *vd; |
486 | |
487 | vd = vchan_next_desc(vc: &chan->vc); |
488 | if (!vd) |
489 | return; |
490 | |
491 | desc = vd_to_axi_desc(vd); |
492 | dev_vdbg(chan2dev(chan), "%s: started %u\n" , axi_chan_name(chan), |
493 | vd->tx.cookie); |
494 | axi_chan_block_xfer_start(chan, first: desc); |
495 | } |
496 | |
497 | static void dma_chan_issue_pending(struct dma_chan *dchan) |
498 | { |
499 | struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); |
500 | unsigned long flags; |
501 | |
502 | spin_lock_irqsave(&chan->vc.lock, flags); |
503 | if (vchan_issue_pending(vc: &chan->vc)) |
504 | axi_chan_start_first_queued(chan); |
505 | spin_unlock_irqrestore(lock: &chan->vc.lock, flags); |
506 | } |
507 | |
508 | static void dw_axi_dma_synchronize(struct dma_chan *dchan) |
509 | { |
510 | struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); |
511 | |
512 | vchan_synchronize(vc: &chan->vc); |
513 | } |
514 | |
515 | static int dma_chan_alloc_chan_resources(struct dma_chan *dchan) |
516 | { |
517 | struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); |
518 | |
519 | /* ASSERT: channel is idle */ |
520 | if (axi_chan_is_hw_enable(chan)) { |
521 | dev_err(chan2dev(chan), "%s is non-idle!\n" , |
522 | axi_chan_name(chan)); |
523 | return -EBUSY; |
524 | } |
525 | |
526 | /* LLI address must be aligned to a 64-byte boundary */ |
527 | chan->desc_pool = dma_pool_create(name: dev_name(dev: chan2dev(chan)), |
528 | dev: chan->chip->dev, |
529 | size: sizeof(struct axi_dma_lli), |
530 | align: 64, allocation: 0); |
531 | if (!chan->desc_pool) { |
532 | dev_err(chan2dev(chan), "No memory for descriptors\n" ); |
533 | return -ENOMEM; |
534 | } |
535 | dev_vdbg(dchan2dev(dchan), "%s: allocating\n" , axi_chan_name(chan)); |
536 | |
537 | pm_runtime_get(dev: chan->chip->dev); |
538 | |
539 | return 0; |
540 | } |
541 | |
542 | static void dma_chan_free_chan_resources(struct dma_chan *dchan) |
543 | { |
544 | struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); |
545 | |
546 | /* ASSERT: channel is idle */ |
547 | if (axi_chan_is_hw_enable(chan)) |
548 | dev_err(dchan2dev(dchan), "%s is non-idle!\n" , |
549 | axi_chan_name(chan)); |
550 | |
551 | axi_chan_disable(chan); |
552 | axi_chan_irq_disable(chan, irq_mask: DWAXIDMAC_IRQ_ALL); |
553 | |
554 | vchan_free_chan_resources(vc: &chan->vc); |
555 | |
556 | dma_pool_destroy(pool: chan->desc_pool); |
557 | chan->desc_pool = NULL; |
558 | dev_vdbg(dchan2dev(dchan), |
559 | "%s: free resources, descriptor still allocated: %u\n" , |
560 | axi_chan_name(chan), atomic_read(&chan->descs_allocated)); |
561 | |
562 | pm_runtime_put(dev: chan->chip->dev); |
563 | } |
564 | |
565 | static void dw_axi_dma_set_hw_channel(struct axi_dma_chan *chan, bool set) |
566 | { |
567 | struct axi_dma_chip *chip = chan->chip; |
568 | unsigned long reg_value, val; |
569 | |
570 | if (!chip->apb_regs) { |
571 | dev_err(chip->dev, "apb_regs not initialized\n" ); |
572 | return; |
573 | } |
574 | |
575 | /* |
576 | * An unused DMA channel has a default value of 0x3F. |
577 | * Lock the DMA channel by assign a handshake number to the channel. |
578 | * Unlock the DMA channel by assign 0x3F to the channel. |
579 | */ |
580 | if (set) |
581 | val = chan->hw_handshake_num; |
582 | else |
583 | val = UNUSED_CHANNEL; |
584 | |
585 | reg_value = lo_hi_readq(addr: chip->apb_regs + DMAC_APB_HW_HS_SEL_0); |
586 | |
587 | /* Channel is already allocated, set handshake as per channel ID */ |
588 | /* 64 bit write should handle for 8 channels */ |
589 | |
590 | reg_value &= ~(DMA_APB_HS_SEL_MASK << |
591 | (chan->id * DMA_APB_HS_SEL_BIT_SIZE)); |
592 | reg_value |= (val << (chan->id * DMA_APB_HS_SEL_BIT_SIZE)); |
593 | lo_hi_writeq(val: reg_value, addr: chip->apb_regs + DMAC_APB_HW_HS_SEL_0); |
594 | |
595 | return; |
596 | } |
597 | |
598 | /* |
599 | * If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI |
600 | * as 1, it understands that the current block is the final block in the |
601 | * transfer and completes the DMA transfer operation at the end of current |
602 | * block transfer. |
603 | */ |
604 | static void set_desc_last(struct axi_dma_hw_desc *desc) |
605 | { |
606 | u32 val; |
607 | |
608 | val = le32_to_cpu(desc->lli->ctl_hi); |
609 | val |= CH_CTL_H_LLI_LAST; |
610 | desc->lli->ctl_hi = cpu_to_le32(val); |
611 | } |
612 | |
613 | static void write_desc_sar(struct axi_dma_hw_desc *desc, dma_addr_t adr) |
614 | { |
615 | desc->lli->sar = cpu_to_le64(adr); |
616 | } |
617 | |
618 | static void write_desc_dar(struct axi_dma_hw_desc *desc, dma_addr_t adr) |
619 | { |
620 | desc->lli->dar = cpu_to_le64(adr); |
621 | } |
622 | |
623 | static void set_desc_src_master(struct axi_dma_hw_desc *desc) |
624 | { |
625 | u32 val; |
626 | |
627 | /* Select AXI0 for source master */ |
628 | val = le32_to_cpu(desc->lli->ctl_lo); |
629 | val &= ~CH_CTL_L_SRC_MAST; |
630 | desc->lli->ctl_lo = cpu_to_le32(val); |
631 | } |
632 | |
633 | static void set_desc_dest_master(struct axi_dma_hw_desc *hw_desc, |
634 | struct axi_dma_desc *desc) |
635 | { |
636 | u32 val; |
637 | |
638 | /* Select AXI1 for source master if available */ |
639 | val = le32_to_cpu(hw_desc->lli->ctl_lo); |
640 | if (desc->chan->chip->dw->hdata->nr_masters > 1) |
641 | val |= CH_CTL_L_DST_MAST; |
642 | else |
643 | val &= ~CH_CTL_L_DST_MAST; |
644 | |
645 | hw_desc->lli->ctl_lo = cpu_to_le32(val); |
646 | } |
647 | |
648 | static int dw_axi_dma_set_hw_desc(struct axi_dma_chan *chan, |
649 | struct axi_dma_hw_desc *hw_desc, |
650 | dma_addr_t mem_addr, size_t len) |
651 | { |
652 | unsigned int data_width = BIT(chan->chip->dw->hdata->m_data_width); |
653 | unsigned int reg_width; |
654 | unsigned int mem_width; |
655 | dma_addr_t device_addr; |
656 | size_t axi_block_ts; |
657 | size_t block_ts; |
658 | u32 ctllo, ctlhi; |
659 | u32 burst_len; |
660 | |
661 | axi_block_ts = chan->chip->dw->hdata->block_size[chan->id]; |
662 | |
663 | mem_width = __ffs(data_width | mem_addr | len); |
664 | if (mem_width > DWAXIDMAC_TRANS_WIDTH_32) |
665 | mem_width = DWAXIDMAC_TRANS_WIDTH_32; |
666 | |
667 | if (!IS_ALIGNED(mem_addr, 4)) { |
668 | dev_err(chan->chip->dev, "invalid buffer alignment\n" ); |
669 | return -EINVAL; |
670 | } |
671 | |
672 | switch (chan->direction) { |
673 | case DMA_MEM_TO_DEV: |
674 | reg_width = __ffs(chan->config.dst_addr_width); |
675 | device_addr = chan->config.dst_addr; |
676 | ctllo = reg_width << CH_CTL_L_DST_WIDTH_POS | |
677 | mem_width << CH_CTL_L_SRC_WIDTH_POS | |
678 | DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_DST_INC_POS | |
679 | DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS; |
680 | block_ts = len >> mem_width; |
681 | break; |
682 | case DMA_DEV_TO_MEM: |
683 | reg_width = __ffs(chan->config.src_addr_width); |
684 | device_addr = chan->config.src_addr; |
685 | ctllo = reg_width << CH_CTL_L_SRC_WIDTH_POS | |
686 | mem_width << CH_CTL_L_DST_WIDTH_POS | |
687 | DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS | |
688 | DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_SRC_INC_POS; |
689 | block_ts = len >> reg_width; |
690 | break; |
691 | default: |
692 | return -EINVAL; |
693 | } |
694 | |
695 | if (block_ts > axi_block_ts) |
696 | return -EINVAL; |
697 | |
698 | hw_desc->lli = axi_desc_get(chan, addr: &hw_desc->llp); |
699 | if (unlikely(!hw_desc->lli)) |
700 | return -ENOMEM; |
701 | |
702 | ctlhi = CH_CTL_H_LLI_VALID; |
703 | |
704 | if (chan->chip->dw->hdata->restrict_axi_burst_len) { |
705 | burst_len = chan->chip->dw->hdata->axi_rw_burst_len; |
706 | ctlhi |= CH_CTL_H_ARLEN_EN | CH_CTL_H_AWLEN_EN | |
707 | burst_len << CH_CTL_H_ARLEN_POS | |
708 | burst_len << CH_CTL_H_AWLEN_POS; |
709 | } |
710 | |
711 | hw_desc->lli->ctl_hi = cpu_to_le32(ctlhi); |
712 | |
713 | if (chan->direction == DMA_MEM_TO_DEV) { |
714 | write_desc_sar(desc: hw_desc, adr: mem_addr); |
715 | write_desc_dar(desc: hw_desc, adr: device_addr); |
716 | } else { |
717 | write_desc_sar(desc: hw_desc, adr: device_addr); |
718 | write_desc_dar(desc: hw_desc, adr: mem_addr); |
719 | } |
720 | |
721 | hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1); |
722 | |
723 | ctllo |= DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS | |
724 | DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS; |
725 | hw_desc->lli->ctl_lo = cpu_to_le32(ctllo); |
726 | |
727 | set_desc_src_master(hw_desc); |
728 | |
729 | hw_desc->len = len; |
730 | return 0; |
731 | } |
732 | |
733 | static size_t calculate_block_len(struct axi_dma_chan *chan, |
734 | dma_addr_t dma_addr, size_t buf_len, |
735 | enum dma_transfer_direction direction) |
736 | { |
737 | u32 data_width, reg_width, mem_width; |
738 | size_t axi_block_ts, block_len; |
739 | |
740 | axi_block_ts = chan->chip->dw->hdata->block_size[chan->id]; |
741 | |
742 | switch (direction) { |
743 | case DMA_MEM_TO_DEV: |
744 | data_width = BIT(chan->chip->dw->hdata->m_data_width); |
745 | mem_width = __ffs(data_width | dma_addr | buf_len); |
746 | if (mem_width > DWAXIDMAC_TRANS_WIDTH_32) |
747 | mem_width = DWAXIDMAC_TRANS_WIDTH_32; |
748 | |
749 | block_len = axi_block_ts << mem_width; |
750 | break; |
751 | case DMA_DEV_TO_MEM: |
752 | reg_width = __ffs(chan->config.src_addr_width); |
753 | block_len = axi_block_ts << reg_width; |
754 | break; |
755 | default: |
756 | block_len = 0; |
757 | } |
758 | |
759 | return block_len; |
760 | } |
761 | |
762 | static struct dma_async_tx_descriptor * |
763 | dw_axi_dma_chan_prep_cyclic(struct dma_chan *dchan, dma_addr_t dma_addr, |
764 | size_t buf_len, size_t period_len, |
765 | enum dma_transfer_direction direction, |
766 | unsigned long flags) |
767 | { |
768 | struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); |
769 | struct axi_dma_hw_desc *hw_desc = NULL; |
770 | struct axi_dma_desc *desc = NULL; |
771 | dma_addr_t src_addr = dma_addr; |
772 | u32 num_periods, num_segments; |
773 | size_t axi_block_len; |
774 | u32 total_segments; |
775 | u32 segment_len; |
776 | unsigned int i; |
777 | int status; |
778 | u64 llp = 0; |
779 | u8 lms = 0; /* Select AXI0 master for LLI fetching */ |
780 | |
781 | num_periods = buf_len / period_len; |
782 | |
783 | axi_block_len = calculate_block_len(chan, dma_addr, buf_len, direction); |
784 | if (axi_block_len == 0) |
785 | return NULL; |
786 | |
787 | num_segments = DIV_ROUND_UP(period_len, axi_block_len); |
788 | segment_len = DIV_ROUND_UP(period_len, num_segments); |
789 | |
790 | total_segments = num_periods * num_segments; |
791 | |
792 | desc = axi_desc_alloc(num: total_segments); |
793 | if (unlikely(!desc)) |
794 | goto err_desc_get; |
795 | |
796 | chan->direction = direction; |
797 | desc->chan = chan; |
798 | chan->cyclic = true; |
799 | desc->length = 0; |
800 | desc->period_len = period_len; |
801 | |
802 | for (i = 0; i < total_segments; i++) { |
803 | hw_desc = &desc->hw_desc[i]; |
804 | |
805 | status = dw_axi_dma_set_hw_desc(chan, hw_desc, mem_addr: src_addr, |
806 | len: segment_len); |
807 | if (status < 0) |
808 | goto err_desc_get; |
809 | |
810 | desc->length += hw_desc->len; |
811 | /* Set end-of-link to the linked descriptor, so that cyclic |
812 | * callback function can be triggered during interrupt. |
813 | */ |
814 | set_desc_last(hw_desc); |
815 | |
816 | src_addr += segment_len; |
817 | } |
818 | |
819 | llp = desc->hw_desc[0].llp; |
820 | |
821 | /* Managed transfer list */ |
822 | do { |
823 | hw_desc = &desc->hw_desc[--total_segments]; |
824 | write_desc_llp(desc: hw_desc, adr: llp | lms); |
825 | llp = hw_desc->llp; |
826 | } while (total_segments); |
827 | |
828 | dw_axi_dma_set_hw_channel(chan, set: true); |
829 | |
830 | return vchan_tx_prep(vc: &chan->vc, vd: &desc->vd, tx_flags: flags); |
831 | |
832 | err_desc_get: |
833 | if (desc) |
834 | axi_desc_put(desc); |
835 | |
836 | return NULL; |
837 | } |
838 | |
839 | static struct dma_async_tx_descriptor * |
840 | dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, |
841 | unsigned int sg_len, |
842 | enum dma_transfer_direction direction, |
843 | unsigned long flags, void *context) |
844 | { |
845 | struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); |
846 | struct axi_dma_hw_desc *hw_desc = NULL; |
847 | struct axi_dma_desc *desc = NULL; |
848 | u32 num_segments, segment_len; |
849 | unsigned int loop = 0; |
850 | struct scatterlist *sg; |
851 | size_t axi_block_len; |
852 | u32 len, num_sgs = 0; |
853 | unsigned int i; |
854 | dma_addr_t mem; |
855 | int status; |
856 | u64 llp = 0; |
857 | u8 lms = 0; /* Select AXI0 master for LLI fetching */ |
858 | |
859 | if (unlikely(!is_slave_direction(direction) || !sg_len)) |
860 | return NULL; |
861 | |
862 | mem = sg_dma_address(sgl); |
863 | len = sg_dma_len(sgl); |
864 | |
865 | axi_block_len = calculate_block_len(chan, dma_addr: mem, buf_len: len, direction); |
866 | if (axi_block_len == 0) |
867 | return NULL; |
868 | |
869 | for_each_sg(sgl, sg, sg_len, i) |
870 | num_sgs += DIV_ROUND_UP(sg_dma_len(sg), axi_block_len); |
871 | |
872 | desc = axi_desc_alloc(num: num_sgs); |
873 | if (unlikely(!desc)) |
874 | goto err_desc_get; |
875 | |
876 | desc->chan = chan; |
877 | desc->length = 0; |
878 | chan->direction = direction; |
879 | |
880 | for_each_sg(sgl, sg, sg_len, i) { |
881 | mem = sg_dma_address(sg); |
882 | len = sg_dma_len(sg); |
883 | num_segments = DIV_ROUND_UP(sg_dma_len(sg), axi_block_len); |
884 | segment_len = DIV_ROUND_UP(sg_dma_len(sg), num_segments); |
885 | |
886 | do { |
887 | hw_desc = &desc->hw_desc[loop++]; |
888 | status = dw_axi_dma_set_hw_desc(chan, hw_desc, mem_addr: mem, len: segment_len); |
889 | if (status < 0) |
890 | goto err_desc_get; |
891 | |
892 | desc->length += hw_desc->len; |
893 | len -= segment_len; |
894 | mem += segment_len; |
895 | } while (len >= segment_len); |
896 | } |
897 | |
898 | /* Set end-of-link to the last link descriptor of list */ |
899 | set_desc_last(&desc->hw_desc[num_sgs - 1]); |
900 | |
901 | /* Managed transfer list */ |
902 | do { |
903 | hw_desc = &desc->hw_desc[--num_sgs]; |
904 | write_desc_llp(desc: hw_desc, adr: llp | lms); |
905 | llp = hw_desc->llp; |
906 | } while (num_sgs); |
907 | |
908 | dw_axi_dma_set_hw_channel(chan, set: true); |
909 | |
910 | return vchan_tx_prep(vc: &chan->vc, vd: &desc->vd, tx_flags: flags); |
911 | |
912 | err_desc_get: |
913 | if (desc) |
914 | axi_desc_put(desc); |
915 | |
916 | return NULL; |
917 | } |
918 | |
919 | static struct dma_async_tx_descriptor * |
920 | dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr, |
921 | dma_addr_t src_adr, size_t len, unsigned long flags) |
922 | { |
923 | struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); |
924 | size_t block_ts, max_block_ts, xfer_len; |
925 | struct axi_dma_hw_desc *hw_desc = NULL; |
926 | struct axi_dma_desc *desc = NULL; |
927 | u32 xfer_width, reg, num; |
928 | u64 llp = 0; |
929 | u8 lms = 0; /* Select AXI0 master for LLI fetching */ |
930 | |
931 | dev_dbg(chan2dev(chan), "%s: memcpy: src: %pad dst: %pad length: %zd flags: %#lx" , |
932 | axi_chan_name(chan), &src_adr, &dst_adr, len, flags); |
933 | |
934 | max_block_ts = chan->chip->dw->hdata->block_size[chan->id]; |
935 | xfer_width = axi_chan_get_xfer_width(chan, src: src_adr, dst: dst_adr, len); |
936 | num = DIV_ROUND_UP(len, max_block_ts << xfer_width); |
937 | desc = axi_desc_alloc(num); |
938 | if (unlikely(!desc)) |
939 | goto err_desc_get; |
940 | |
941 | desc->chan = chan; |
942 | num = 0; |
943 | desc->length = 0; |
944 | while (len) { |
945 | xfer_len = len; |
946 | |
947 | hw_desc = &desc->hw_desc[num]; |
948 | /* |
949 | * Take care for the alignment. |
950 | * Actually source and destination widths can be different, but |
951 | * make them same to be simpler. |
952 | */ |
953 | xfer_width = axi_chan_get_xfer_width(chan, src: src_adr, dst: dst_adr, len: xfer_len); |
954 | |
955 | /* |
956 | * block_ts indicates the total number of data of width |
957 | * to be transferred in a DMA block transfer. |
958 | * BLOCK_TS register should be set to block_ts - 1 |
959 | */ |
960 | block_ts = xfer_len >> xfer_width; |
961 | if (block_ts > max_block_ts) { |
962 | block_ts = max_block_ts; |
963 | xfer_len = max_block_ts << xfer_width; |
964 | } |
965 | |
966 | hw_desc->lli = axi_desc_get(chan, addr: &hw_desc->llp); |
967 | if (unlikely(!hw_desc->lli)) |
968 | goto err_desc_get; |
969 | |
970 | write_desc_sar(desc: hw_desc, adr: src_adr); |
971 | write_desc_dar(desc: hw_desc, adr: dst_adr); |
972 | hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1); |
973 | |
974 | reg = CH_CTL_H_LLI_VALID; |
975 | if (chan->chip->dw->hdata->restrict_axi_burst_len) { |
976 | u32 burst_len = chan->chip->dw->hdata->axi_rw_burst_len; |
977 | |
978 | reg |= (CH_CTL_H_ARLEN_EN | |
979 | burst_len << CH_CTL_H_ARLEN_POS | |
980 | CH_CTL_H_AWLEN_EN | |
981 | burst_len << CH_CTL_H_AWLEN_POS); |
982 | } |
983 | hw_desc->lli->ctl_hi = cpu_to_le32(reg); |
984 | |
985 | reg = (DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS | |
986 | DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS | |
987 | xfer_width << CH_CTL_L_DST_WIDTH_POS | |
988 | xfer_width << CH_CTL_L_SRC_WIDTH_POS | |
989 | DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS | |
990 | DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS); |
991 | hw_desc->lli->ctl_lo = cpu_to_le32(reg); |
992 | |
993 | set_desc_src_master(hw_desc); |
994 | set_desc_dest_master(hw_desc, desc); |
995 | |
996 | hw_desc->len = xfer_len; |
997 | desc->length += hw_desc->len; |
998 | /* update the length and addresses for the next loop cycle */ |
999 | len -= xfer_len; |
1000 | dst_adr += xfer_len; |
1001 | src_adr += xfer_len; |
1002 | num++; |
1003 | } |
1004 | |
1005 | /* Set end-of-link to the last link descriptor of list */ |
1006 | set_desc_last(&desc->hw_desc[num - 1]); |
1007 | /* Managed transfer list */ |
1008 | do { |
1009 | hw_desc = &desc->hw_desc[--num]; |
1010 | write_desc_llp(desc: hw_desc, adr: llp | lms); |
1011 | llp = hw_desc->llp; |
1012 | } while (num); |
1013 | |
1014 | return vchan_tx_prep(vc: &chan->vc, vd: &desc->vd, tx_flags: flags); |
1015 | |
1016 | err_desc_get: |
1017 | if (desc) |
1018 | axi_desc_put(desc); |
1019 | return NULL; |
1020 | } |
1021 | |
1022 | static int dw_axi_dma_chan_slave_config(struct dma_chan *dchan, |
1023 | struct dma_slave_config *config) |
1024 | { |
1025 | struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); |
1026 | |
1027 | memcpy(&chan->config, config, sizeof(*config)); |
1028 | |
1029 | return 0; |
1030 | } |
1031 | |
1032 | static void axi_chan_dump_lli(struct axi_dma_chan *chan, |
1033 | struct axi_dma_hw_desc *desc) |
1034 | { |
1035 | if (!desc->lli) { |
1036 | dev_err(dchan2dev(&chan->vc.chan), "NULL LLI\n" ); |
1037 | return; |
1038 | } |
1039 | |
1040 | dev_err(dchan2dev(&chan->vc.chan), |
1041 | "SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x" , |
1042 | le64_to_cpu(desc->lli->sar), |
1043 | le64_to_cpu(desc->lli->dar), |
1044 | le64_to_cpu(desc->lli->llp), |
1045 | le32_to_cpu(desc->lli->block_ts_lo), |
1046 | le32_to_cpu(desc->lli->ctl_hi), |
1047 | le32_to_cpu(desc->lli->ctl_lo)); |
1048 | } |
1049 | |
1050 | static void axi_chan_list_dump_lli(struct axi_dma_chan *chan, |
1051 | struct axi_dma_desc *desc_head) |
1052 | { |
1053 | int count = atomic_read(v: &chan->descs_allocated); |
1054 | int i; |
1055 | |
1056 | for (i = 0; i < count; i++) |
1057 | axi_chan_dump_lli(chan, desc: &desc_head->hw_desc[i]); |
1058 | } |
1059 | |
1060 | static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status) |
1061 | { |
1062 | struct virt_dma_desc *vd; |
1063 | unsigned long flags; |
1064 | |
1065 | spin_lock_irqsave(&chan->vc.lock, flags); |
1066 | |
1067 | axi_chan_disable(chan); |
1068 | |
1069 | /* The bad descriptor currently is in the head of vc list */ |
1070 | vd = vchan_next_desc(vc: &chan->vc); |
1071 | if (!vd) { |
1072 | dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n" , |
1073 | axi_chan_name(chan)); |
1074 | goto out; |
1075 | } |
1076 | /* Remove the completed descriptor from issued list */ |
1077 | list_del(entry: &vd->node); |
1078 | |
1079 | /* WARN about bad descriptor */ |
1080 | dev_err(chan2dev(chan), |
1081 | "Bad descriptor submitted for %s, cookie: %d, irq: 0x%08x\n" , |
1082 | axi_chan_name(chan), vd->tx.cookie, status); |
1083 | axi_chan_list_dump_lli(chan, desc_head: vd_to_axi_desc(vd)); |
1084 | |
1085 | vchan_cookie_complete(vd); |
1086 | |
1087 | /* Try to restart the controller */ |
1088 | axi_chan_start_first_queued(chan); |
1089 | |
1090 | out: |
1091 | spin_unlock_irqrestore(lock: &chan->vc.lock, flags); |
1092 | } |
1093 | |
1094 | static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan) |
1095 | { |
1096 | int count = atomic_read(v: &chan->descs_allocated); |
1097 | struct axi_dma_hw_desc *hw_desc; |
1098 | struct axi_dma_desc *desc; |
1099 | struct virt_dma_desc *vd; |
1100 | unsigned long flags; |
1101 | u64 llp; |
1102 | int i; |
1103 | |
1104 | spin_lock_irqsave(&chan->vc.lock, flags); |
1105 | if (unlikely(axi_chan_is_hw_enable(chan))) { |
1106 | dev_err(chan2dev(chan), "BUG: %s caught DWAXIDMAC_IRQ_DMA_TRF, but channel not idle!\n" , |
1107 | axi_chan_name(chan)); |
1108 | axi_chan_disable(chan); |
1109 | } |
1110 | |
1111 | /* The completed descriptor currently is in the head of vc list */ |
1112 | vd = vchan_next_desc(vc: &chan->vc); |
1113 | if (!vd) { |
1114 | dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n" , |
1115 | axi_chan_name(chan)); |
1116 | goto out; |
1117 | } |
1118 | |
1119 | if (chan->cyclic) { |
1120 | desc = vd_to_axi_desc(vd); |
1121 | if (desc) { |
1122 | llp = lo_hi_readq(addr: chan->chan_regs + CH_LLP); |
1123 | for (i = 0; i < count; i++) { |
1124 | hw_desc = &desc->hw_desc[i]; |
1125 | if (hw_desc->llp == llp) { |
1126 | axi_chan_irq_clear(chan, irq_mask: hw_desc->lli->status_lo); |
1127 | hw_desc->lli->ctl_hi |= CH_CTL_H_LLI_VALID; |
1128 | desc->completed_blocks = i; |
1129 | |
1130 | if (((hw_desc->len * (i + 1)) % desc->period_len) == 0) |
1131 | vchan_cyclic_callback(vd); |
1132 | break; |
1133 | } |
1134 | } |
1135 | |
1136 | axi_chan_enable(chan); |
1137 | } |
1138 | } else { |
1139 | /* Remove the completed descriptor from issued list before completing */ |
1140 | list_del(entry: &vd->node); |
1141 | vchan_cookie_complete(vd); |
1142 | |
1143 | /* Submit queued descriptors after processing the completed ones */ |
1144 | axi_chan_start_first_queued(chan); |
1145 | } |
1146 | |
1147 | out: |
1148 | spin_unlock_irqrestore(lock: &chan->vc.lock, flags); |
1149 | } |
1150 | |
1151 | static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id) |
1152 | { |
1153 | struct axi_dma_chip *chip = dev_id; |
1154 | struct dw_axi_dma *dw = chip->dw; |
1155 | struct axi_dma_chan *chan; |
1156 | |
1157 | u32 status, i; |
1158 | |
1159 | /* Disable DMAC interrupts. We'll enable them after processing channels */ |
1160 | axi_dma_irq_disable(chip); |
1161 | |
1162 | /* Poll, clear and process every channel interrupt status */ |
1163 | for (i = 0; i < dw->hdata->nr_channels; i++) { |
1164 | chan = &dw->chan[i]; |
1165 | status = axi_chan_irq_read(chan); |
1166 | axi_chan_irq_clear(chan, irq_mask: status); |
1167 | |
1168 | dev_vdbg(chip->dev, "%s %u IRQ status: 0x%08x\n" , |
1169 | axi_chan_name(chan), i, status); |
1170 | |
1171 | if (status & DWAXIDMAC_IRQ_ALL_ERR) |
1172 | axi_chan_handle_err(chan, status); |
1173 | else if (status & DWAXIDMAC_IRQ_DMA_TRF) |
1174 | axi_chan_block_xfer_complete(chan); |
1175 | } |
1176 | |
1177 | /* Re-enable interrupts */ |
1178 | axi_dma_irq_enable(chip); |
1179 | |
1180 | return IRQ_HANDLED; |
1181 | } |
1182 | |
1183 | static int dma_chan_terminate_all(struct dma_chan *dchan) |
1184 | { |
1185 | struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); |
1186 | u32 chan_active = BIT(chan->id) << DMAC_CHAN_EN_SHIFT; |
1187 | unsigned long flags; |
1188 | u32 val; |
1189 | int ret; |
1190 | LIST_HEAD(head); |
1191 | |
1192 | axi_chan_disable(chan); |
1193 | |
1194 | ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val, |
1195 | !(val & chan_active), 1000, 50000); |
1196 | if (ret == -ETIMEDOUT) |
1197 | dev_warn(dchan2dev(dchan), |
1198 | "%s failed to stop\n" , axi_chan_name(chan)); |
1199 | |
1200 | if (chan->direction != DMA_MEM_TO_MEM) |
1201 | dw_axi_dma_set_hw_channel(chan, set: false); |
1202 | if (chan->direction == DMA_MEM_TO_DEV) |
1203 | dw_axi_dma_set_byte_halfword(chan, set: false); |
1204 | |
1205 | spin_lock_irqsave(&chan->vc.lock, flags); |
1206 | |
1207 | vchan_get_all_descriptors(vc: &chan->vc, head: &head); |
1208 | |
1209 | chan->cyclic = false; |
1210 | spin_unlock_irqrestore(lock: &chan->vc.lock, flags); |
1211 | |
1212 | vchan_dma_desc_free_list(vc: &chan->vc, head: &head); |
1213 | |
1214 | dev_vdbg(dchan2dev(dchan), "terminated: %s\n" , axi_chan_name(chan)); |
1215 | |
1216 | return 0; |
1217 | } |
1218 | |
1219 | static int dma_chan_pause(struct dma_chan *dchan) |
1220 | { |
1221 | struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); |
1222 | unsigned long flags; |
1223 | unsigned int timeout = 20; /* timeout iterations */ |
1224 | u64 val; |
1225 | |
1226 | spin_lock_irqsave(&chan->vc.lock, flags); |
1227 | |
1228 | if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) { |
1229 | val = axi_dma_ioread64(chip: chan->chip, DMAC_CHSUSPREG); |
1230 | if (chan->id >= DMAC_CHAN_16) { |
1231 | val |= (u64)(BIT(chan->id) >> DMAC_CHAN_16) |
1232 | << (DMAC_CHAN_SUSP2_SHIFT + DMAC_CHAN_BLOCK_SHIFT) | |
1233 | (u64)(BIT(chan->id) >> DMAC_CHAN_16) |
1234 | << (DMAC_CHAN_SUSP2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT); |
1235 | } else { |
1236 | val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT | |
1237 | BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT; |
1238 | } |
1239 | axi_dma_iowrite64(chip: chan->chip, DMAC_CHSUSPREG, val); |
1240 | } else { |
1241 | if (chan->chip->dw->hdata->reg_map_8_channels) { |
1242 | val = axi_dma_ioread32(chip: chan->chip, DMAC_CHEN); |
1243 | val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT | |
1244 | BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT; |
1245 | axi_dma_iowrite32(chip: chan->chip, DMAC_CHEN, val: (u32)val); |
1246 | } else { |
1247 | val = axi_dma_ioread32(chip: chan->chip, DMAC_CHSUSPREG); |
1248 | val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT | |
1249 | BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT; |
1250 | axi_dma_iowrite32(chip: chan->chip, DMAC_CHSUSPREG, val: (u32)val); |
1251 | } |
1252 | } |
1253 | |
1254 | do { |
1255 | if (axi_chan_irq_read(chan) & DWAXIDMAC_IRQ_SUSPENDED) |
1256 | break; |
1257 | |
1258 | udelay(2); |
1259 | } while (--timeout); |
1260 | |
1261 | axi_chan_irq_clear(chan, irq_mask: DWAXIDMAC_IRQ_SUSPENDED); |
1262 | |
1263 | chan->is_paused = true; |
1264 | |
1265 | spin_unlock_irqrestore(lock: &chan->vc.lock, flags); |
1266 | |
1267 | return timeout ? 0 : -EAGAIN; |
1268 | } |
1269 | |
1270 | /* Called in chan locked context */ |
1271 | static inline void axi_chan_resume(struct axi_dma_chan *chan) |
1272 | { |
1273 | u64 val; |
1274 | |
1275 | if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) { |
1276 | val = axi_dma_ioread64(chip: chan->chip, DMAC_CHSUSPREG); |
1277 | if (chan->id >= DMAC_CHAN_16) { |
1278 | val &= ~((u64)(BIT(chan->id) >> DMAC_CHAN_16) |
1279 | << (DMAC_CHAN_SUSP2_SHIFT + DMAC_CHAN_BLOCK_SHIFT)); |
1280 | val |= ((u64)(BIT(chan->id) >> DMAC_CHAN_16) |
1281 | << (DMAC_CHAN_SUSP2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT)); |
1282 | } else { |
1283 | val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT); |
1284 | val |= (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT); |
1285 | } |
1286 | axi_dma_iowrite64(chip: chan->chip, DMAC_CHSUSPREG, val); |
1287 | } else { |
1288 | if (chan->chip->dw->hdata->reg_map_8_channels) { |
1289 | val = axi_dma_ioread32(chip: chan->chip, DMAC_CHEN); |
1290 | val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT); |
1291 | val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT); |
1292 | axi_dma_iowrite32(chip: chan->chip, DMAC_CHEN, val: (u32)val); |
1293 | } else { |
1294 | val = axi_dma_ioread32(chip: chan->chip, DMAC_CHSUSPREG); |
1295 | val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT); |
1296 | val |= (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT); |
1297 | axi_dma_iowrite32(chip: chan->chip, DMAC_CHSUSPREG, val: (u32)val); |
1298 | } |
1299 | } |
1300 | |
1301 | chan->is_paused = false; |
1302 | } |
1303 | |
1304 | static int dma_chan_resume(struct dma_chan *dchan) |
1305 | { |
1306 | struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); |
1307 | unsigned long flags; |
1308 | |
1309 | spin_lock_irqsave(&chan->vc.lock, flags); |
1310 | |
1311 | if (chan->is_paused) |
1312 | axi_chan_resume(chan); |
1313 | |
1314 | spin_unlock_irqrestore(lock: &chan->vc.lock, flags); |
1315 | |
1316 | return 0; |
1317 | } |
1318 | |
1319 | static int axi_dma_suspend(struct axi_dma_chip *chip) |
1320 | { |
1321 | axi_dma_irq_disable(chip); |
1322 | axi_dma_disable(chip); |
1323 | |
1324 | clk_disable_unprepare(clk: chip->core_clk); |
1325 | clk_disable_unprepare(clk: chip->cfgr_clk); |
1326 | |
1327 | return 0; |
1328 | } |
1329 | |
1330 | static int axi_dma_resume(struct axi_dma_chip *chip) |
1331 | { |
1332 | int ret; |
1333 | |
1334 | ret = clk_prepare_enable(clk: chip->cfgr_clk); |
1335 | if (ret < 0) |
1336 | return ret; |
1337 | |
1338 | ret = clk_prepare_enable(clk: chip->core_clk); |
1339 | if (ret < 0) |
1340 | return ret; |
1341 | |
1342 | axi_dma_enable(chip); |
1343 | axi_dma_irq_enable(chip); |
1344 | |
1345 | return 0; |
1346 | } |
1347 | |
1348 | static int __maybe_unused axi_dma_runtime_suspend(struct device *dev) |
1349 | { |
1350 | struct axi_dma_chip *chip = dev_get_drvdata(dev); |
1351 | |
1352 | return axi_dma_suspend(chip); |
1353 | } |
1354 | |
1355 | static int __maybe_unused axi_dma_runtime_resume(struct device *dev) |
1356 | { |
1357 | struct axi_dma_chip *chip = dev_get_drvdata(dev); |
1358 | |
1359 | return axi_dma_resume(chip); |
1360 | } |
1361 | |
1362 | static struct dma_chan *dw_axi_dma_of_xlate(struct of_phandle_args *dma_spec, |
1363 | struct of_dma *ofdma) |
1364 | { |
1365 | struct dw_axi_dma *dw = ofdma->of_dma_data; |
1366 | struct axi_dma_chan *chan; |
1367 | struct dma_chan *dchan; |
1368 | |
1369 | dchan = dma_get_any_slave_channel(device: &dw->dma); |
1370 | if (!dchan) |
1371 | return NULL; |
1372 | |
1373 | chan = dchan_to_axi_dma_chan(dchan); |
1374 | chan->hw_handshake_num = dma_spec->args[0]; |
1375 | return dchan; |
1376 | } |
1377 | |
1378 | static int parse_device_properties(struct axi_dma_chip *chip) |
1379 | { |
1380 | struct device *dev = chip->dev; |
1381 | u32 tmp, carr[DMAC_MAX_CHANNELS]; |
1382 | int ret; |
1383 | |
1384 | ret = device_property_read_u32(dev, propname: "dma-channels" , val: &tmp); |
1385 | if (ret) |
1386 | return ret; |
1387 | if (tmp == 0 || tmp > DMAC_MAX_CHANNELS) |
1388 | return -EINVAL; |
1389 | |
1390 | chip->dw->hdata->nr_channels = tmp; |
1391 | if (tmp <= DMA_REG_MAP_CH_REF) |
1392 | chip->dw->hdata->reg_map_8_channels = true; |
1393 | |
1394 | ret = device_property_read_u32(dev, propname: "snps,dma-masters" , val: &tmp); |
1395 | if (ret) |
1396 | return ret; |
1397 | if (tmp == 0 || tmp > DMAC_MAX_MASTERS) |
1398 | return -EINVAL; |
1399 | |
1400 | chip->dw->hdata->nr_masters = tmp; |
1401 | |
1402 | ret = device_property_read_u32(dev, propname: "snps,data-width" , val: &tmp); |
1403 | if (ret) |
1404 | return ret; |
1405 | if (tmp > DWAXIDMAC_TRANS_WIDTH_MAX) |
1406 | return -EINVAL; |
1407 | |
1408 | chip->dw->hdata->m_data_width = tmp; |
1409 | |
1410 | ret = device_property_read_u32_array(dev, propname: "snps,block-size" , val: carr, |
1411 | nval: chip->dw->hdata->nr_channels); |
1412 | if (ret) |
1413 | return ret; |
1414 | for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) { |
1415 | if (carr[tmp] == 0 || carr[tmp] > DMAC_MAX_BLK_SIZE) |
1416 | return -EINVAL; |
1417 | |
1418 | chip->dw->hdata->block_size[tmp] = carr[tmp]; |
1419 | } |
1420 | |
1421 | ret = device_property_read_u32_array(dev, propname: "snps,priority" , val: carr, |
1422 | nval: chip->dw->hdata->nr_channels); |
1423 | if (ret) |
1424 | return ret; |
1425 | /* Priority value must be programmed within [0:nr_channels-1] range */ |
1426 | for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) { |
1427 | if (carr[tmp] >= chip->dw->hdata->nr_channels) |
1428 | return -EINVAL; |
1429 | |
1430 | chip->dw->hdata->priority[tmp] = carr[tmp]; |
1431 | } |
1432 | |
1433 | /* axi-max-burst-len is optional property */ |
1434 | ret = device_property_read_u32(dev, propname: "snps,axi-max-burst-len" , val: &tmp); |
1435 | if (!ret) { |
1436 | if (tmp > DWAXIDMAC_ARWLEN_MAX + 1) |
1437 | return -EINVAL; |
1438 | if (tmp < DWAXIDMAC_ARWLEN_MIN + 1) |
1439 | return -EINVAL; |
1440 | |
1441 | chip->dw->hdata->restrict_axi_burst_len = true; |
1442 | chip->dw->hdata->axi_rw_burst_len = tmp; |
1443 | } |
1444 | |
1445 | return 0; |
1446 | } |
1447 | |
1448 | static int dw_probe(struct platform_device *pdev) |
1449 | { |
1450 | struct axi_dma_chip *chip; |
1451 | struct dw_axi_dma *dw; |
1452 | struct dw_axi_dma_hcfg *hdata; |
1453 | struct reset_control *resets; |
1454 | unsigned int flags; |
1455 | u32 i; |
1456 | int ret; |
1457 | |
1458 | chip = devm_kzalloc(dev: &pdev->dev, size: sizeof(*chip), GFP_KERNEL); |
1459 | if (!chip) |
1460 | return -ENOMEM; |
1461 | |
1462 | dw = devm_kzalloc(dev: &pdev->dev, size: sizeof(*dw), GFP_KERNEL); |
1463 | if (!dw) |
1464 | return -ENOMEM; |
1465 | |
1466 | hdata = devm_kzalloc(dev: &pdev->dev, size: sizeof(*hdata), GFP_KERNEL); |
1467 | if (!hdata) |
1468 | return -ENOMEM; |
1469 | |
1470 | chip->dw = dw; |
1471 | chip->dev = &pdev->dev; |
1472 | chip->dw->hdata = hdata; |
1473 | |
1474 | chip->irq = platform_get_irq(pdev, 0); |
1475 | if (chip->irq < 0) |
1476 | return chip->irq; |
1477 | |
1478 | chip->regs = devm_platform_ioremap_resource(pdev, index: 0); |
1479 | if (IS_ERR(ptr: chip->regs)) |
1480 | return PTR_ERR(ptr: chip->regs); |
1481 | |
1482 | flags = (uintptr_t)of_device_get_match_data(dev: &pdev->dev); |
1483 | if (flags & AXI_DMA_FLAG_HAS_APB_REGS) { |
1484 | chip->apb_regs = devm_platform_ioremap_resource(pdev, index: 1); |
1485 | if (IS_ERR(ptr: chip->apb_regs)) |
1486 | return PTR_ERR(ptr: chip->apb_regs); |
1487 | } |
1488 | |
1489 | if (flags & AXI_DMA_FLAG_HAS_RESETS) { |
1490 | resets = devm_reset_control_array_get_exclusive(dev: &pdev->dev); |
1491 | if (IS_ERR(ptr: resets)) |
1492 | return PTR_ERR(ptr: resets); |
1493 | |
1494 | ret = reset_control_deassert(rstc: resets); |
1495 | if (ret) |
1496 | return ret; |
1497 | } |
1498 | |
1499 | chip->dw->hdata->use_cfg2 = !!(flags & AXI_DMA_FLAG_USE_CFG2); |
1500 | |
1501 | chip->core_clk = devm_clk_get(dev: chip->dev, id: "core-clk" ); |
1502 | if (IS_ERR(ptr: chip->core_clk)) |
1503 | return PTR_ERR(ptr: chip->core_clk); |
1504 | |
1505 | chip->cfgr_clk = devm_clk_get(dev: chip->dev, id: "cfgr-clk" ); |
1506 | if (IS_ERR(ptr: chip->cfgr_clk)) |
1507 | return PTR_ERR(ptr: chip->cfgr_clk); |
1508 | |
1509 | ret = parse_device_properties(chip); |
1510 | if (ret) |
1511 | return ret; |
1512 | |
1513 | dw->chan = devm_kcalloc(dev: chip->dev, n: hdata->nr_channels, |
1514 | size: sizeof(*dw->chan), GFP_KERNEL); |
1515 | if (!dw->chan) |
1516 | return -ENOMEM; |
1517 | |
1518 | ret = devm_request_irq(dev: chip->dev, irq: chip->irq, handler: dw_axi_dma_interrupt, |
1519 | IRQF_SHARED, KBUILD_MODNAME, dev_id: chip); |
1520 | if (ret) |
1521 | return ret; |
1522 | |
1523 | INIT_LIST_HEAD(list: &dw->dma.channels); |
1524 | for (i = 0; i < hdata->nr_channels; i++) { |
1525 | struct axi_dma_chan *chan = &dw->chan[i]; |
1526 | |
1527 | chan->chip = chip; |
1528 | chan->id = i; |
1529 | chan->chan_regs = chip->regs + COMMON_REG_LEN + i * CHAN_REG_LEN; |
1530 | atomic_set(v: &chan->descs_allocated, i: 0); |
1531 | |
1532 | chan->vc.desc_free = vchan_desc_put; |
1533 | vchan_init(vc: &chan->vc, dmadev: &dw->dma); |
1534 | } |
1535 | |
1536 | /* Set capabilities */ |
1537 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); |
1538 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); |
1539 | dma_cap_set(DMA_CYCLIC, dw->dma.cap_mask); |
1540 | |
1541 | /* DMA capabilities */ |
1542 | dw->dma.max_burst = hdata->axi_rw_burst_len; |
1543 | dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS; |
1544 | dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS; |
1545 | dw->dma.directions = BIT(DMA_MEM_TO_MEM); |
1546 | dw->dma.directions |= BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); |
1547 | dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
1548 | |
1549 | dw->dma.dev = chip->dev; |
1550 | dw->dma.device_tx_status = dma_chan_tx_status; |
1551 | dw->dma.device_issue_pending = dma_chan_issue_pending; |
1552 | dw->dma.device_terminate_all = dma_chan_terminate_all; |
1553 | dw->dma.device_pause = dma_chan_pause; |
1554 | dw->dma.device_resume = dma_chan_resume; |
1555 | |
1556 | dw->dma.device_alloc_chan_resources = dma_chan_alloc_chan_resources; |
1557 | dw->dma.device_free_chan_resources = dma_chan_free_chan_resources; |
1558 | |
1559 | dw->dma.device_prep_dma_memcpy = dma_chan_prep_dma_memcpy; |
1560 | dw->dma.device_synchronize = dw_axi_dma_synchronize; |
1561 | dw->dma.device_config = dw_axi_dma_chan_slave_config; |
1562 | dw->dma.device_prep_slave_sg = dw_axi_dma_chan_prep_slave_sg; |
1563 | dw->dma.device_prep_dma_cyclic = dw_axi_dma_chan_prep_cyclic; |
1564 | |
1565 | /* |
1566 | * Synopsis DesignWare AxiDMA datasheet mentioned Maximum |
1567 | * supported blocks is 1024. Device register width is 4 bytes. |
1568 | * Therefore, set constraint to 1024 * 4. |
1569 | */ |
1570 | dw->dma.dev->dma_parms = &dw->dma_parms; |
1571 | dma_set_max_seg_size(dev: &pdev->dev, MAX_BLOCK_SIZE); |
1572 | platform_set_drvdata(pdev, data: chip); |
1573 | |
1574 | pm_runtime_enable(dev: chip->dev); |
1575 | |
1576 | /* |
1577 | * We can't just call pm_runtime_get here instead of |
1578 | * pm_runtime_get_noresume + axi_dma_resume because we need |
1579 | * driver to work also without Runtime PM. |
1580 | */ |
1581 | pm_runtime_get_noresume(dev: chip->dev); |
1582 | ret = axi_dma_resume(chip); |
1583 | if (ret < 0) |
1584 | goto err_pm_disable; |
1585 | |
1586 | axi_dma_hw_init(chip); |
1587 | |
1588 | pm_runtime_put(dev: chip->dev); |
1589 | |
1590 | ret = dmaenginem_async_device_register(device: &dw->dma); |
1591 | if (ret) |
1592 | goto err_pm_disable; |
1593 | |
1594 | /* Register with OF helpers for DMA lookups */ |
1595 | ret = of_dma_controller_register(np: pdev->dev.of_node, |
1596 | of_dma_xlate: dw_axi_dma_of_xlate, data: dw); |
1597 | if (ret < 0) |
1598 | dev_warn(&pdev->dev, |
1599 | "Failed to register OF DMA controller, fallback to MEM_TO_MEM mode\n" ); |
1600 | |
1601 | dev_info(chip->dev, "DesignWare AXI DMA Controller, %d channels\n" , |
1602 | dw->hdata->nr_channels); |
1603 | |
1604 | return 0; |
1605 | |
1606 | err_pm_disable: |
1607 | pm_runtime_disable(dev: chip->dev); |
1608 | |
1609 | return ret; |
1610 | } |
1611 | |
1612 | static void dw_remove(struct platform_device *pdev) |
1613 | { |
1614 | struct axi_dma_chip *chip = platform_get_drvdata(pdev); |
1615 | struct dw_axi_dma *dw = chip->dw; |
1616 | struct axi_dma_chan *chan, *_chan; |
1617 | u32 i; |
1618 | |
1619 | /* Enable clk before accessing to registers */ |
1620 | clk_prepare_enable(clk: chip->cfgr_clk); |
1621 | clk_prepare_enable(clk: chip->core_clk); |
1622 | axi_dma_irq_disable(chip); |
1623 | for (i = 0; i < dw->hdata->nr_channels; i++) { |
1624 | axi_chan_disable(chan: &chip->dw->chan[i]); |
1625 | axi_chan_irq_disable(chan: &chip->dw->chan[i], irq_mask: DWAXIDMAC_IRQ_ALL); |
1626 | } |
1627 | axi_dma_disable(chip); |
1628 | |
1629 | pm_runtime_disable(dev: chip->dev); |
1630 | axi_dma_suspend(chip); |
1631 | |
1632 | devm_free_irq(dev: chip->dev, irq: chip->irq, dev_id: chip); |
1633 | |
1634 | of_dma_controller_free(np: chip->dev->of_node); |
1635 | |
1636 | list_for_each_entry_safe(chan, _chan, &dw->dma.channels, |
1637 | vc.chan.device_node) { |
1638 | list_del(entry: &chan->vc.chan.device_node); |
1639 | tasklet_kill(t: &chan->vc.task); |
1640 | } |
1641 | } |
1642 | |
1643 | static const struct dev_pm_ops dw_axi_dma_pm_ops = { |
1644 | SET_RUNTIME_PM_OPS(axi_dma_runtime_suspend, axi_dma_runtime_resume, NULL) |
1645 | }; |
1646 | |
1647 | static const struct of_device_id dw_dma_of_id_table[] = { |
1648 | { |
1649 | .compatible = "snps,axi-dma-1.01a" |
1650 | }, { |
1651 | .compatible = "intel,kmb-axi-dma" , |
1652 | .data = (void *)AXI_DMA_FLAG_HAS_APB_REGS, |
1653 | }, { |
1654 | .compatible = "starfive,jh7110-axi-dma" , |
1655 | .data = (void *)(AXI_DMA_FLAG_HAS_RESETS | AXI_DMA_FLAG_USE_CFG2), |
1656 | }, |
1657 | {} |
1658 | }; |
1659 | MODULE_DEVICE_TABLE(of, dw_dma_of_id_table); |
1660 | |
1661 | static struct platform_driver dw_driver = { |
1662 | .probe = dw_probe, |
1663 | .remove_new = dw_remove, |
1664 | .driver = { |
1665 | .name = KBUILD_MODNAME, |
1666 | .of_match_table = dw_dma_of_id_table, |
1667 | .pm = &dw_axi_dma_pm_ops, |
1668 | }, |
1669 | }; |
1670 | module_platform_driver(dw_driver); |
1671 | |
1672 | MODULE_LICENSE("GPL v2" ); |
1673 | MODULE_DESCRIPTION("Synopsys DesignWare AXI DMA Controller platform driver" ); |
1674 | MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>" ); |
1675 | |