1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com |
4 | * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> |
5 | */ |
6 | |
7 | #include <linux/kernel.h> |
8 | #include <linux/module.h> |
9 | #include <linux/delay.h> |
10 | #include <linux/dmaengine.h> |
11 | #include <linux/dma-mapping.h> |
12 | #include <linux/dmapool.h> |
13 | #include <linux/err.h> |
14 | #include <linux/init.h> |
15 | #include <linux/interrupt.h> |
16 | #include <linux/list.h> |
17 | #include <linux/platform_device.h> |
18 | #include <linux/slab.h> |
19 | #include <linux/spinlock.h> |
20 | #include <linux/sys_soc.h> |
21 | #include <linux/of.h> |
22 | #include <linux/of_dma.h> |
23 | #include <linux/of_irq.h> |
24 | #include <linux/workqueue.h> |
25 | #include <linux/completion.h> |
26 | #include <linux/soc/ti/k3-ringacc.h> |
27 | #include <linux/soc/ti/ti_sci_protocol.h> |
28 | #include <linux/soc/ti/ti_sci_inta_msi.h> |
29 | #include <linux/dma/k3-event-router.h> |
30 | #include <linux/dma/ti-cppi5.h> |
31 | |
32 | #include "../virt-dma.h" |
33 | #include "k3-udma.h" |
34 | #include "k3-psil-priv.h" |
35 | |
36 | struct udma_static_tr { |
37 | u8 elsize; /* RPSTR0 */ |
38 | u16 elcnt; /* RPSTR0 */ |
39 | u16 bstcnt; /* RPSTR1 */ |
40 | }; |
41 | |
42 | #define K3_UDMA_MAX_RFLOWS 1024 |
43 | #define K3_UDMA_DEFAULT_RING_SIZE 16 |
44 | |
45 | /* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */ |
46 | #define UDMA_RFLOW_SRCTAG_NONE 0 |
47 | #define UDMA_RFLOW_SRCTAG_CFG_TAG 1 |
48 | #define UDMA_RFLOW_SRCTAG_FLOW_ID 2 |
49 | #define UDMA_RFLOW_SRCTAG_SRC_TAG 4 |
50 | |
51 | #define UDMA_RFLOW_DSTTAG_NONE 0 |
52 | #define UDMA_RFLOW_DSTTAG_CFG_TAG 1 |
53 | #define UDMA_RFLOW_DSTTAG_FLOW_ID 2 |
54 | #define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4 |
55 | #define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5 |
56 | |
57 | struct udma_chan; |
58 | |
59 | enum k3_dma_type { |
60 | DMA_TYPE_UDMA = 0, |
61 | DMA_TYPE_BCDMA, |
62 | DMA_TYPE_PKTDMA, |
63 | }; |
64 | |
65 | enum udma_mmr { |
66 | MMR_GCFG = 0, |
67 | MMR_BCHANRT, |
68 | MMR_RCHANRT, |
69 | MMR_TCHANRT, |
70 | MMR_LAST, |
71 | }; |
72 | |
73 | static const char * const mmr_names[] = { |
74 | [MMR_GCFG] = "gcfg" , |
75 | [MMR_BCHANRT] = "bchanrt" , |
76 | [MMR_RCHANRT] = "rchanrt" , |
77 | [MMR_TCHANRT] = "tchanrt" , |
78 | }; |
79 | |
80 | struct udma_tchan { |
81 | void __iomem *reg_rt; |
82 | |
83 | int id; |
84 | struct k3_ring *t_ring; /* Transmit ring */ |
85 | struct k3_ring *tc_ring; /* Transmit Completion ring */ |
86 | int tflow_id; /* applicable only for PKTDMA */ |
87 | |
88 | }; |
89 | |
90 | #define udma_bchan udma_tchan |
91 | |
92 | struct udma_rflow { |
93 | int id; |
94 | struct k3_ring *fd_ring; /* Free Descriptor ring */ |
95 | struct k3_ring *r_ring; /* Receive ring */ |
96 | }; |
97 | |
98 | struct udma_rchan { |
99 | void __iomem *reg_rt; |
100 | |
101 | int id; |
102 | }; |
103 | |
104 | struct udma_oes_offsets { |
105 | /* K3 UDMA Output Event Offset */ |
106 | u32 udma_rchan; |
107 | |
108 | /* BCDMA Output Event Offsets */ |
109 | u32 bcdma_bchan_data; |
110 | u32 bcdma_bchan_ring; |
111 | u32 bcdma_tchan_data; |
112 | u32 bcdma_tchan_ring; |
113 | u32 bcdma_rchan_data; |
114 | u32 bcdma_rchan_ring; |
115 | |
116 | /* PKTDMA Output Event Offsets */ |
117 | u32 pktdma_tchan_flow; |
118 | u32 pktdma_rchan_flow; |
119 | }; |
120 | |
121 | #define UDMA_FLAG_PDMA_ACC32 BIT(0) |
122 | #define UDMA_FLAG_PDMA_BURST BIT(1) |
123 | #define UDMA_FLAG_TDTYPE BIT(2) |
124 | #define UDMA_FLAG_BURST_SIZE BIT(3) |
125 | #define UDMA_FLAGS_J7_CLASS (UDMA_FLAG_PDMA_ACC32 | \ |
126 | UDMA_FLAG_PDMA_BURST | \ |
127 | UDMA_FLAG_TDTYPE | \ |
128 | UDMA_FLAG_BURST_SIZE) |
129 | |
130 | struct udma_match_data { |
131 | enum k3_dma_type type; |
132 | u32 psil_base; |
133 | bool enable_memcpy_support; |
134 | u32 flags; |
135 | u32 statictr_z_mask; |
136 | u8 burst_size[3]; |
137 | struct udma_soc_data *soc_data; |
138 | }; |
139 | |
140 | struct udma_soc_data { |
141 | struct udma_oes_offsets oes; |
142 | u32 bcdma_trigger_event_offset; |
143 | }; |
144 | |
145 | struct udma_hwdesc { |
146 | size_t cppi5_desc_size; |
147 | void *cppi5_desc_vaddr; |
148 | dma_addr_t cppi5_desc_paddr; |
149 | |
150 | /* TR descriptor internal pointers */ |
151 | void *tr_req_base; |
152 | struct cppi5_tr_resp_t *tr_resp_base; |
153 | }; |
154 | |
155 | struct udma_rx_flush { |
156 | struct udma_hwdesc hwdescs[2]; |
157 | |
158 | size_t buffer_size; |
159 | void *buffer_vaddr; |
160 | dma_addr_t buffer_paddr; |
161 | }; |
162 | |
163 | struct udma_tpl { |
164 | u8 levels; |
165 | u32 start_idx[3]; |
166 | }; |
167 | |
168 | struct udma_dev { |
169 | struct dma_device ddev; |
170 | struct device *dev; |
171 | void __iomem *mmrs[MMR_LAST]; |
172 | const struct udma_match_data *match_data; |
173 | const struct udma_soc_data *soc_data; |
174 | |
175 | struct udma_tpl bchan_tpl; |
176 | struct udma_tpl tchan_tpl; |
177 | struct udma_tpl rchan_tpl; |
178 | |
179 | size_t desc_align; /* alignment to use for descriptors */ |
180 | |
181 | struct udma_tisci_rm tisci_rm; |
182 | |
183 | struct k3_ringacc *ringacc; |
184 | |
185 | struct work_struct purge_work; |
186 | struct list_head desc_to_purge; |
187 | spinlock_t lock; |
188 | |
189 | struct udma_rx_flush rx_flush; |
190 | |
191 | int bchan_cnt; |
192 | int tchan_cnt; |
193 | int echan_cnt; |
194 | int rchan_cnt; |
195 | int rflow_cnt; |
196 | int tflow_cnt; |
197 | unsigned long *bchan_map; |
198 | unsigned long *tchan_map; |
199 | unsigned long *rchan_map; |
200 | unsigned long *rflow_gp_map; |
201 | unsigned long *rflow_gp_map_allocated; |
202 | unsigned long *rflow_in_use; |
203 | unsigned long *tflow_map; |
204 | |
205 | struct udma_bchan *bchans; |
206 | struct udma_tchan *tchans; |
207 | struct udma_rchan *rchans; |
208 | struct udma_rflow *rflows; |
209 | |
210 | struct udma_chan *channels; |
211 | u32 psil_base; |
212 | u32 atype; |
213 | u32 asel; |
214 | }; |
215 | |
216 | struct udma_desc { |
217 | struct virt_dma_desc vd; |
218 | |
219 | bool terminated; |
220 | |
221 | enum dma_transfer_direction dir; |
222 | |
223 | struct udma_static_tr static_tr; |
224 | u32 residue; |
225 | |
226 | unsigned int sglen; |
227 | unsigned int desc_idx; /* Only used for cyclic in packet mode */ |
228 | unsigned int tr_idx; |
229 | |
230 | u32 metadata_size; |
231 | void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */ |
232 | |
233 | unsigned int hwdesc_count; |
234 | struct udma_hwdesc hwdesc[]; |
235 | }; |
236 | |
237 | enum udma_chan_state { |
238 | UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */ |
239 | UDMA_CHAN_IS_ACTIVE, /* Normal operation */ |
240 | UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */ |
241 | }; |
242 | |
243 | struct udma_tx_drain { |
244 | struct delayed_work work; |
245 | ktime_t tstamp; |
246 | u32 residue; |
247 | }; |
248 | |
249 | struct udma_chan_config { |
250 | bool pkt_mode; /* TR or packet */ |
251 | bool needs_epib; /* EPIB is needed for the communication or not */ |
252 | u32 psd_size; /* size of Protocol Specific Data */ |
253 | u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */ |
254 | u32 hdesc_size; /* Size of a packet descriptor in packet mode */ |
255 | bool notdpkt; /* Suppress sending TDC packet */ |
256 | int remote_thread_id; |
257 | u32 atype; |
258 | u32 asel; |
259 | u32 src_thread; |
260 | u32 dst_thread; |
261 | enum psil_endpoint_type ep_type; |
262 | bool enable_acc32; |
263 | bool enable_burst; |
264 | enum udma_tp_level channel_tpl; /* Channel Throughput Level */ |
265 | |
266 | u32 tr_trigger_type; |
267 | unsigned long tx_flags; |
268 | |
269 | /* PKDMA mapped channel */ |
270 | int mapped_channel_id; |
271 | /* PKTDMA default tflow or rflow for mapped channel */ |
272 | int default_flow_id; |
273 | |
274 | enum dma_transfer_direction dir; |
275 | }; |
276 | |
277 | struct udma_chan { |
278 | struct virt_dma_chan vc; |
279 | struct dma_slave_config cfg; |
280 | struct udma_dev *ud; |
281 | struct device *dma_dev; |
282 | struct udma_desc *desc; |
283 | struct udma_desc *terminated_desc; |
284 | struct udma_static_tr static_tr; |
285 | char *name; |
286 | |
287 | struct udma_bchan *bchan; |
288 | struct udma_tchan *tchan; |
289 | struct udma_rchan *rchan; |
290 | struct udma_rflow *rflow; |
291 | |
292 | bool psil_paired; |
293 | |
294 | int irq_num_ring; |
295 | int irq_num_udma; |
296 | |
297 | bool cyclic; |
298 | bool paused; |
299 | |
300 | enum udma_chan_state state; |
301 | struct completion teardown_completed; |
302 | |
303 | struct udma_tx_drain tx_drain; |
304 | |
305 | /* Channel configuration parameters */ |
306 | struct udma_chan_config config; |
307 | /* Channel configuration parameters (backup) */ |
308 | struct udma_chan_config backup_config; |
309 | |
310 | /* dmapool for packet mode descriptors */ |
311 | bool use_dma_pool; |
312 | struct dma_pool *hdesc_pool; |
313 | |
314 | u32 id; |
315 | }; |
316 | |
317 | static inline struct udma_dev *to_udma_dev(struct dma_device *d) |
318 | { |
319 | return container_of(d, struct udma_dev, ddev); |
320 | } |
321 | |
322 | static inline struct udma_chan *to_udma_chan(struct dma_chan *c) |
323 | { |
324 | return container_of(c, struct udma_chan, vc.chan); |
325 | } |
326 | |
327 | static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t) |
328 | { |
329 | return container_of(t, struct udma_desc, vd.tx); |
330 | } |
331 | |
332 | /* Generic register access functions */ |
333 | static inline u32 udma_read(void __iomem *base, int reg) |
334 | { |
335 | return readl(addr: base + reg); |
336 | } |
337 | |
338 | static inline void udma_write(void __iomem *base, int reg, u32 val) |
339 | { |
340 | writel(val, addr: base + reg); |
341 | } |
342 | |
343 | static inline void udma_update_bits(void __iomem *base, int reg, |
344 | u32 mask, u32 val) |
345 | { |
346 | u32 tmp, orig; |
347 | |
348 | orig = readl(addr: base + reg); |
349 | tmp = orig & ~mask; |
350 | tmp |= (val & mask); |
351 | |
352 | if (tmp != orig) |
353 | writel(val: tmp, addr: base + reg); |
354 | } |
355 | |
356 | /* TCHANRT */ |
357 | static inline u32 udma_tchanrt_read(struct udma_chan *uc, int reg) |
358 | { |
359 | if (!uc->tchan) |
360 | return 0; |
361 | return udma_read(base: uc->tchan->reg_rt, reg); |
362 | } |
363 | |
364 | static inline void udma_tchanrt_write(struct udma_chan *uc, int reg, u32 val) |
365 | { |
366 | if (!uc->tchan) |
367 | return; |
368 | udma_write(base: uc->tchan->reg_rt, reg, val); |
369 | } |
370 | |
371 | static inline void udma_tchanrt_update_bits(struct udma_chan *uc, int reg, |
372 | u32 mask, u32 val) |
373 | { |
374 | if (!uc->tchan) |
375 | return; |
376 | udma_update_bits(base: uc->tchan->reg_rt, reg, mask, val); |
377 | } |
378 | |
379 | /* RCHANRT */ |
380 | static inline u32 udma_rchanrt_read(struct udma_chan *uc, int reg) |
381 | { |
382 | if (!uc->rchan) |
383 | return 0; |
384 | return udma_read(base: uc->rchan->reg_rt, reg); |
385 | } |
386 | |
387 | static inline void udma_rchanrt_write(struct udma_chan *uc, int reg, u32 val) |
388 | { |
389 | if (!uc->rchan) |
390 | return; |
391 | udma_write(base: uc->rchan->reg_rt, reg, val); |
392 | } |
393 | |
394 | static inline void udma_rchanrt_update_bits(struct udma_chan *uc, int reg, |
395 | u32 mask, u32 val) |
396 | { |
397 | if (!uc->rchan) |
398 | return; |
399 | udma_update_bits(base: uc->rchan->reg_rt, reg, mask, val); |
400 | } |
401 | |
402 | static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread) |
403 | { |
404 | struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; |
405 | |
406 | dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; |
407 | return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci, |
408 | tisci_rm->tisci_navss_dev_id, |
409 | src_thread, dst_thread); |
410 | } |
411 | |
412 | static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread, |
413 | u32 dst_thread) |
414 | { |
415 | struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; |
416 | |
417 | dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; |
418 | return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci, |
419 | tisci_rm->tisci_navss_dev_id, |
420 | src_thread, dst_thread); |
421 | } |
422 | |
423 | static void k3_configure_chan_coherency(struct dma_chan *chan, u32 asel) |
424 | { |
425 | struct device *chan_dev = &chan->dev->device; |
426 | |
427 | if (asel == 0) { |
428 | /* No special handling for the channel */ |
429 | chan->dev->chan_dma_dev = false; |
430 | |
431 | chan_dev->dma_coherent = false; |
432 | chan_dev->dma_parms = NULL; |
433 | } else if (asel == 14 || asel == 15) { |
434 | chan->dev->chan_dma_dev = true; |
435 | |
436 | chan_dev->dma_coherent = true; |
437 | dma_coerce_mask_and_coherent(dev: chan_dev, DMA_BIT_MASK(48)); |
438 | chan_dev->dma_parms = chan_dev->parent->dma_parms; |
439 | } else { |
440 | dev_warn(chan->device->dev, "Invalid ASEL value: %u\n" , asel); |
441 | |
442 | chan_dev->dma_coherent = false; |
443 | chan_dev->dma_parms = NULL; |
444 | } |
445 | } |
446 | |
447 | static u8 udma_get_chan_tpl_index(struct udma_tpl *tpl_map, int chan_id) |
448 | { |
449 | int i; |
450 | |
451 | for (i = 0; i < tpl_map->levels; i++) { |
452 | if (chan_id >= tpl_map->start_idx[i]) |
453 | return i; |
454 | } |
455 | |
456 | return 0; |
457 | } |
458 | |
459 | static void udma_reset_uchan(struct udma_chan *uc) |
460 | { |
461 | memset(&uc->config, 0, sizeof(uc->config)); |
462 | uc->config.remote_thread_id = -1; |
463 | uc->config.mapped_channel_id = -1; |
464 | uc->config.default_flow_id = -1; |
465 | uc->state = UDMA_CHAN_IS_IDLE; |
466 | } |
467 | |
468 | static void udma_dump_chan_stdata(struct udma_chan *uc) |
469 | { |
470 | struct device *dev = uc->ud->dev; |
471 | u32 offset; |
472 | int i; |
473 | |
474 | if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) { |
475 | dev_dbg(dev, "TCHAN State data:\n" ); |
476 | for (i = 0; i < 32; i++) { |
477 | offset = UDMA_CHAN_RT_STDATA_REG + i * 4; |
478 | dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n" , i, |
479 | udma_tchanrt_read(uc, offset)); |
480 | } |
481 | } |
482 | |
483 | if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) { |
484 | dev_dbg(dev, "RCHAN State data:\n" ); |
485 | for (i = 0; i < 32; i++) { |
486 | offset = UDMA_CHAN_RT_STDATA_REG + i * 4; |
487 | dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n" , i, |
488 | udma_rchanrt_read(uc, offset)); |
489 | } |
490 | } |
491 | } |
492 | |
493 | static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d, |
494 | int idx) |
495 | { |
496 | return d->hwdesc[idx].cppi5_desc_paddr; |
497 | } |
498 | |
499 | static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx) |
500 | { |
501 | return d->hwdesc[idx].cppi5_desc_vaddr; |
502 | } |
503 | |
504 | static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc, |
505 | dma_addr_t paddr) |
506 | { |
507 | struct udma_desc *d = uc->terminated_desc; |
508 | |
509 | if (d) { |
510 | dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, |
511 | idx: d->desc_idx); |
512 | |
513 | if (desc_paddr != paddr) |
514 | d = NULL; |
515 | } |
516 | |
517 | if (!d) { |
518 | d = uc->desc; |
519 | if (d) { |
520 | dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, |
521 | idx: d->desc_idx); |
522 | |
523 | if (desc_paddr != paddr) |
524 | d = NULL; |
525 | } |
526 | } |
527 | |
528 | return d; |
529 | } |
530 | |
531 | static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d) |
532 | { |
533 | if (uc->use_dma_pool) { |
534 | int i; |
535 | |
536 | for (i = 0; i < d->hwdesc_count; i++) { |
537 | if (!d->hwdesc[i].cppi5_desc_vaddr) |
538 | continue; |
539 | |
540 | dma_pool_free(pool: uc->hdesc_pool, |
541 | vaddr: d->hwdesc[i].cppi5_desc_vaddr, |
542 | addr: d->hwdesc[i].cppi5_desc_paddr); |
543 | |
544 | d->hwdesc[i].cppi5_desc_vaddr = NULL; |
545 | } |
546 | } else if (d->hwdesc[0].cppi5_desc_vaddr) { |
547 | dma_free_coherent(dev: uc->dma_dev, size: d->hwdesc[0].cppi5_desc_size, |
548 | cpu_addr: d->hwdesc[0].cppi5_desc_vaddr, |
549 | dma_handle: d->hwdesc[0].cppi5_desc_paddr); |
550 | |
551 | d->hwdesc[0].cppi5_desc_vaddr = NULL; |
552 | } |
553 | } |
554 | |
555 | static void udma_purge_desc_work(struct work_struct *work) |
556 | { |
557 | struct udma_dev *ud = container_of(work, typeof(*ud), purge_work); |
558 | struct virt_dma_desc *vd, *_vd; |
559 | unsigned long flags; |
560 | LIST_HEAD(head); |
561 | |
562 | spin_lock_irqsave(&ud->lock, flags); |
563 | list_splice_tail_init(list: &ud->desc_to_purge, head: &head); |
564 | spin_unlock_irqrestore(lock: &ud->lock, flags); |
565 | |
566 | list_for_each_entry_safe(vd, _vd, &head, node) { |
567 | struct udma_chan *uc = to_udma_chan(c: vd->tx.chan); |
568 | struct udma_desc *d = to_udma_desc(t: &vd->tx); |
569 | |
570 | udma_free_hwdesc(uc, d); |
571 | list_del(entry: &vd->node); |
572 | kfree(objp: d); |
573 | } |
574 | |
575 | /* If more to purge, schedule the work again */ |
576 | if (!list_empty(head: &ud->desc_to_purge)) |
577 | schedule_work(work: &ud->purge_work); |
578 | } |
579 | |
580 | static void udma_desc_free(struct virt_dma_desc *vd) |
581 | { |
582 | struct udma_dev *ud = to_udma_dev(d: vd->tx.chan->device); |
583 | struct udma_chan *uc = to_udma_chan(c: vd->tx.chan); |
584 | struct udma_desc *d = to_udma_desc(t: &vd->tx); |
585 | unsigned long flags; |
586 | |
587 | if (uc->terminated_desc == d) |
588 | uc->terminated_desc = NULL; |
589 | |
590 | if (uc->use_dma_pool) { |
591 | udma_free_hwdesc(uc, d); |
592 | kfree(objp: d); |
593 | return; |
594 | } |
595 | |
596 | spin_lock_irqsave(&ud->lock, flags); |
597 | list_add_tail(new: &vd->node, head: &ud->desc_to_purge); |
598 | spin_unlock_irqrestore(lock: &ud->lock, flags); |
599 | |
600 | schedule_work(work: &ud->purge_work); |
601 | } |
602 | |
603 | static bool udma_is_chan_running(struct udma_chan *uc) |
604 | { |
605 | u32 trt_ctl = 0; |
606 | u32 rrt_ctl = 0; |
607 | |
608 | if (uc->tchan) |
609 | trt_ctl = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG); |
610 | if (uc->rchan) |
611 | rrt_ctl = udma_rchanrt_read(uc, UDMA_CHAN_RT_CTL_REG); |
612 | |
613 | if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN) |
614 | return true; |
615 | |
616 | return false; |
617 | } |
618 | |
619 | static bool udma_is_chan_paused(struct udma_chan *uc) |
620 | { |
621 | u32 val, pause_mask; |
622 | |
623 | switch (uc->config.dir) { |
624 | case DMA_DEV_TO_MEM: |
625 | val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG); |
626 | pause_mask = UDMA_PEER_RT_EN_PAUSE; |
627 | break; |
628 | case DMA_MEM_TO_DEV: |
629 | val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG); |
630 | pause_mask = UDMA_PEER_RT_EN_PAUSE; |
631 | break; |
632 | case DMA_MEM_TO_MEM: |
633 | val = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG); |
634 | pause_mask = UDMA_CHAN_RT_CTL_PAUSE; |
635 | break; |
636 | default: |
637 | return false; |
638 | } |
639 | |
640 | if (val & pause_mask) |
641 | return true; |
642 | |
643 | return false; |
644 | } |
645 | |
646 | static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc) |
647 | { |
648 | return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr; |
649 | } |
650 | |
651 | static int udma_push_to_ring(struct udma_chan *uc, int idx) |
652 | { |
653 | struct udma_desc *d = uc->desc; |
654 | struct k3_ring *ring = NULL; |
655 | dma_addr_t paddr; |
656 | |
657 | switch (uc->config.dir) { |
658 | case DMA_DEV_TO_MEM: |
659 | ring = uc->rflow->fd_ring; |
660 | break; |
661 | case DMA_MEM_TO_DEV: |
662 | case DMA_MEM_TO_MEM: |
663 | ring = uc->tchan->t_ring; |
664 | break; |
665 | default: |
666 | return -EINVAL; |
667 | } |
668 | |
669 | /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */ |
670 | if (idx == -1) { |
671 | paddr = udma_get_rx_flush_hwdesc_paddr(uc); |
672 | } else { |
673 | paddr = udma_curr_cppi5_desc_paddr(d, idx); |
674 | |
675 | wmb(); /* Ensure that writes are not moved over this point */ |
676 | } |
677 | |
678 | return k3_ringacc_ring_push(ring, elem: &paddr); |
679 | } |
680 | |
681 | static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr) |
682 | { |
683 | if (uc->config.dir != DMA_DEV_TO_MEM) |
684 | return false; |
685 | |
686 | if (addr == udma_get_rx_flush_hwdesc_paddr(uc)) |
687 | return true; |
688 | |
689 | return false; |
690 | } |
691 | |
692 | static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr) |
693 | { |
694 | struct k3_ring *ring = NULL; |
695 | int ret; |
696 | |
697 | switch (uc->config.dir) { |
698 | case DMA_DEV_TO_MEM: |
699 | ring = uc->rflow->r_ring; |
700 | break; |
701 | case DMA_MEM_TO_DEV: |
702 | case DMA_MEM_TO_MEM: |
703 | ring = uc->tchan->tc_ring; |
704 | break; |
705 | default: |
706 | return -ENOENT; |
707 | } |
708 | |
709 | ret = k3_ringacc_ring_pop(ring, elem: addr); |
710 | if (ret) |
711 | return ret; |
712 | |
713 | rmb(); /* Ensure that reads are not moved before this point */ |
714 | |
715 | /* Teardown completion */ |
716 | if (cppi5_desc_is_tdcm(paddr: *addr)) |
717 | return 0; |
718 | |
719 | /* Check for flush descriptor */ |
720 | if (udma_desc_is_rx_flush(uc, addr: *addr)) |
721 | return -ENOENT; |
722 | |
723 | return 0; |
724 | } |
725 | |
726 | static void udma_reset_rings(struct udma_chan *uc) |
727 | { |
728 | struct k3_ring *ring1 = NULL; |
729 | struct k3_ring *ring2 = NULL; |
730 | |
731 | switch (uc->config.dir) { |
732 | case DMA_DEV_TO_MEM: |
733 | if (uc->rchan) { |
734 | ring1 = uc->rflow->fd_ring; |
735 | ring2 = uc->rflow->r_ring; |
736 | } |
737 | break; |
738 | case DMA_MEM_TO_DEV: |
739 | case DMA_MEM_TO_MEM: |
740 | if (uc->tchan) { |
741 | ring1 = uc->tchan->t_ring; |
742 | ring2 = uc->tchan->tc_ring; |
743 | } |
744 | break; |
745 | default: |
746 | break; |
747 | } |
748 | |
749 | if (ring1) |
750 | k3_ringacc_ring_reset_dma(ring: ring1, |
751 | occ: k3_ringacc_ring_get_occ(ring: ring1)); |
752 | if (ring2) |
753 | k3_ringacc_ring_reset(ring: ring2); |
754 | |
755 | /* make sure we are not leaking memory by stalled descriptor */ |
756 | if (uc->terminated_desc) { |
757 | udma_desc_free(vd: &uc->terminated_desc->vd); |
758 | uc->terminated_desc = NULL; |
759 | } |
760 | } |
761 | |
762 | static void udma_decrement_byte_counters(struct udma_chan *uc, u32 val) |
763 | { |
764 | if (uc->desc->dir == DMA_DEV_TO_MEM) { |
765 | udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val); |
766 | udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val); |
767 | if (uc->config.ep_type != PSIL_EP_NATIVE) |
768 | udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val); |
769 | } else { |
770 | udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val); |
771 | udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val); |
772 | if (!uc->bchan && uc->config.ep_type != PSIL_EP_NATIVE) |
773 | udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val); |
774 | } |
775 | } |
776 | |
777 | static void udma_reset_counters(struct udma_chan *uc) |
778 | { |
779 | u32 val; |
780 | |
781 | if (uc->tchan) { |
782 | val = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); |
783 | udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val); |
784 | |
785 | val = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG); |
786 | udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val); |
787 | |
788 | val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG); |
789 | udma_tchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val); |
790 | |
791 | if (!uc->bchan) { |
792 | val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG); |
793 | udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val); |
794 | } |
795 | } |
796 | |
797 | if (uc->rchan) { |
798 | val = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); |
799 | udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val); |
800 | |
801 | val = udma_rchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG); |
802 | udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val); |
803 | |
804 | val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG); |
805 | udma_rchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val); |
806 | |
807 | val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG); |
808 | udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val); |
809 | } |
810 | } |
811 | |
812 | static int udma_reset_chan(struct udma_chan *uc, bool hard) |
813 | { |
814 | switch (uc->config.dir) { |
815 | case DMA_DEV_TO_MEM: |
816 | udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, val: 0); |
817 | udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, val: 0); |
818 | break; |
819 | case DMA_MEM_TO_DEV: |
820 | udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, val: 0); |
821 | udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, val: 0); |
822 | break; |
823 | case DMA_MEM_TO_MEM: |
824 | udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, val: 0); |
825 | udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, val: 0); |
826 | break; |
827 | default: |
828 | return -EINVAL; |
829 | } |
830 | |
831 | /* Reset all counters */ |
832 | udma_reset_counters(uc); |
833 | |
834 | /* Hard reset: re-initialize the channel to reset */ |
835 | if (hard) { |
836 | struct udma_chan_config ucc_backup; |
837 | int ret; |
838 | |
839 | memcpy(&ucc_backup, &uc->config, sizeof(uc->config)); |
840 | uc->ud->ddev.device_free_chan_resources(&uc->vc.chan); |
841 | |
842 | /* restore the channel configuration */ |
843 | memcpy(&uc->config, &ucc_backup, sizeof(uc->config)); |
844 | ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan); |
845 | if (ret) |
846 | return ret; |
847 | |
848 | /* |
849 | * Setting forced teardown after forced reset helps recovering |
850 | * the rchan. |
851 | */ |
852 | if (uc->config.dir == DMA_DEV_TO_MEM) |
853 | udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, |
854 | UDMA_CHAN_RT_CTL_EN | |
855 | UDMA_CHAN_RT_CTL_TDOWN | |
856 | UDMA_CHAN_RT_CTL_FTDOWN); |
857 | } |
858 | uc->state = UDMA_CHAN_IS_IDLE; |
859 | |
860 | return 0; |
861 | } |
862 | |
863 | static void udma_start_desc(struct udma_chan *uc) |
864 | { |
865 | struct udma_chan_config *ucc = &uc->config; |
866 | |
867 | if (uc->ud->match_data->type == DMA_TYPE_UDMA && ucc->pkt_mode && |
868 | (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) { |
869 | int i; |
870 | |
871 | /* |
872 | * UDMA only: Push all descriptors to ring for packet mode |
873 | * cyclic or RX |
874 | * PKTDMA supports pre-linked descriptor and cyclic is not |
875 | * supported |
876 | */ |
877 | for (i = 0; i < uc->desc->sglen; i++) |
878 | udma_push_to_ring(uc, idx: i); |
879 | } else { |
880 | udma_push_to_ring(uc, idx: 0); |
881 | } |
882 | } |
883 | |
884 | static bool udma_chan_needs_reconfiguration(struct udma_chan *uc) |
885 | { |
886 | /* Only PDMAs have staticTR */ |
887 | if (uc->config.ep_type == PSIL_EP_NATIVE) |
888 | return false; |
889 | |
890 | /* Check if the staticTR configuration has changed for TX */ |
891 | if (memcmp(p: &uc->static_tr, q: &uc->desc->static_tr, size: sizeof(uc->static_tr))) |
892 | return true; |
893 | |
894 | return false; |
895 | } |
896 | |
897 | static int udma_start(struct udma_chan *uc) |
898 | { |
899 | struct virt_dma_desc *vd = vchan_next_desc(vc: &uc->vc); |
900 | |
901 | if (!vd) { |
902 | uc->desc = NULL; |
903 | return -ENOENT; |
904 | } |
905 | |
906 | list_del(entry: &vd->node); |
907 | |
908 | uc->desc = to_udma_desc(t: &vd->tx); |
909 | |
910 | /* Channel is already running and does not need reconfiguration */ |
911 | if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) { |
912 | udma_start_desc(uc); |
913 | goto out; |
914 | } |
915 | |
916 | /* Make sure that we clear the teardown bit, if it is set */ |
917 | udma_reset_chan(uc, hard: false); |
918 | |
919 | /* Push descriptors before we start the channel */ |
920 | udma_start_desc(uc); |
921 | |
922 | switch (uc->desc->dir) { |
923 | case DMA_DEV_TO_MEM: |
924 | /* Config remote TR */ |
925 | if (uc->config.ep_type == PSIL_EP_PDMA_XY) { |
926 | u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) | |
927 | PDMA_STATIC_TR_X(uc->desc->static_tr.elsize); |
928 | const struct udma_match_data *match_data = |
929 | uc->ud->match_data; |
930 | |
931 | if (uc->config.enable_acc32) |
932 | val |= PDMA_STATIC_TR_XY_ACC32; |
933 | if (uc->config.enable_burst) |
934 | val |= PDMA_STATIC_TR_XY_BURST; |
935 | |
936 | udma_rchanrt_write(uc, |
937 | UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG, |
938 | val); |
939 | |
940 | udma_rchanrt_write(uc, |
941 | UDMA_CHAN_RT_PEER_STATIC_TR_Z_REG, |
942 | PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt, |
943 | match_data->statictr_z_mask)); |
944 | |
945 | /* save the current staticTR configuration */ |
946 | memcpy(&uc->static_tr, &uc->desc->static_tr, |
947 | sizeof(uc->static_tr)); |
948 | } |
949 | |
950 | udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, |
951 | UDMA_CHAN_RT_CTL_EN); |
952 | |
953 | /* Enable remote */ |
954 | udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, |
955 | UDMA_PEER_RT_EN_ENABLE); |
956 | |
957 | break; |
958 | case DMA_MEM_TO_DEV: |
959 | /* Config remote TR */ |
960 | if (uc->config.ep_type == PSIL_EP_PDMA_XY) { |
961 | u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) | |
962 | PDMA_STATIC_TR_X(uc->desc->static_tr.elsize); |
963 | |
964 | if (uc->config.enable_acc32) |
965 | val |= PDMA_STATIC_TR_XY_ACC32; |
966 | if (uc->config.enable_burst) |
967 | val |= PDMA_STATIC_TR_XY_BURST; |
968 | |
969 | udma_tchanrt_write(uc, |
970 | UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG, |
971 | val); |
972 | |
973 | /* save the current staticTR configuration */ |
974 | memcpy(&uc->static_tr, &uc->desc->static_tr, |
975 | sizeof(uc->static_tr)); |
976 | } |
977 | |
978 | /* Enable remote */ |
979 | udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, |
980 | UDMA_PEER_RT_EN_ENABLE); |
981 | |
982 | udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, |
983 | UDMA_CHAN_RT_CTL_EN); |
984 | |
985 | break; |
986 | case DMA_MEM_TO_MEM: |
987 | udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, |
988 | UDMA_CHAN_RT_CTL_EN); |
989 | udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, |
990 | UDMA_CHAN_RT_CTL_EN); |
991 | |
992 | break; |
993 | default: |
994 | return -EINVAL; |
995 | } |
996 | |
997 | uc->state = UDMA_CHAN_IS_ACTIVE; |
998 | out: |
999 | |
1000 | return 0; |
1001 | } |
1002 | |
1003 | static int udma_stop(struct udma_chan *uc) |
1004 | { |
1005 | enum udma_chan_state old_state = uc->state; |
1006 | |
1007 | uc->state = UDMA_CHAN_IS_TERMINATING; |
1008 | reinit_completion(x: &uc->teardown_completed); |
1009 | |
1010 | switch (uc->config.dir) { |
1011 | case DMA_DEV_TO_MEM: |
1012 | if (!uc->cyclic && !uc->desc) |
1013 | udma_push_to_ring(uc, idx: -1); |
1014 | |
1015 | udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, |
1016 | UDMA_PEER_RT_EN_ENABLE | |
1017 | UDMA_PEER_RT_EN_TEARDOWN); |
1018 | break; |
1019 | case DMA_MEM_TO_DEV: |
1020 | udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, |
1021 | UDMA_PEER_RT_EN_ENABLE | |
1022 | UDMA_PEER_RT_EN_FLUSH); |
1023 | udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, |
1024 | UDMA_CHAN_RT_CTL_EN | |
1025 | UDMA_CHAN_RT_CTL_TDOWN); |
1026 | break; |
1027 | case DMA_MEM_TO_MEM: |
1028 | udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, |
1029 | UDMA_CHAN_RT_CTL_EN | |
1030 | UDMA_CHAN_RT_CTL_TDOWN); |
1031 | break; |
1032 | default: |
1033 | uc->state = old_state; |
1034 | complete_all(&uc->teardown_completed); |
1035 | return -EINVAL; |
1036 | } |
1037 | |
1038 | return 0; |
1039 | } |
1040 | |
1041 | static void udma_cyclic_packet_elapsed(struct udma_chan *uc) |
1042 | { |
1043 | struct udma_desc *d = uc->desc; |
1044 | struct cppi5_host_desc_t *h_desc; |
1045 | |
1046 | h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr; |
1047 | cppi5_hdesc_reset_to_original(desc: h_desc); |
1048 | udma_push_to_ring(uc, idx: d->desc_idx); |
1049 | d->desc_idx = (d->desc_idx + 1) % d->sglen; |
1050 | } |
1051 | |
1052 | static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d) |
1053 | { |
1054 | struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr; |
1055 | |
1056 | memcpy(d->metadata, h_desc->epib, d->metadata_size); |
1057 | } |
1058 | |
1059 | static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d) |
1060 | { |
1061 | u32 peer_bcnt, bcnt; |
1062 | |
1063 | /* |
1064 | * Only TX towards PDMA is affected. |
1065 | * If DMA_PREP_INTERRUPT is not set by consumer then skip the transfer |
1066 | * completion calculation, consumer must ensure that there is no stale |
1067 | * data in DMA fabric in this case. |
1068 | */ |
1069 | if (uc->config.ep_type == PSIL_EP_NATIVE || |
1070 | uc->config.dir != DMA_MEM_TO_DEV || !(uc->config.tx_flags & DMA_PREP_INTERRUPT)) |
1071 | return true; |
1072 | |
1073 | peer_bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG); |
1074 | bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); |
1075 | |
1076 | /* Transfer is incomplete, store current residue and time stamp */ |
1077 | if (peer_bcnt < bcnt) { |
1078 | uc->tx_drain.residue = bcnt - peer_bcnt; |
1079 | uc->tx_drain.tstamp = ktime_get(); |
1080 | return false; |
1081 | } |
1082 | |
1083 | return true; |
1084 | } |
1085 | |
1086 | static void udma_check_tx_completion(struct work_struct *work) |
1087 | { |
1088 | struct udma_chan *uc = container_of(work, typeof(*uc), |
1089 | tx_drain.work.work); |
1090 | bool desc_done = true; |
1091 | u32 residue_diff; |
1092 | ktime_t time_diff; |
1093 | unsigned long delay; |
1094 | |
1095 | while (1) { |
1096 | if (uc->desc) { |
1097 | /* Get previous residue and time stamp */ |
1098 | residue_diff = uc->tx_drain.residue; |
1099 | time_diff = uc->tx_drain.tstamp; |
1100 | /* |
1101 | * Get current residue and time stamp or see if |
1102 | * transfer is complete |
1103 | */ |
1104 | desc_done = udma_is_desc_really_done(uc, d: uc->desc); |
1105 | } |
1106 | |
1107 | if (!desc_done) { |
1108 | /* |
1109 | * Find the time delta and residue delta w.r.t |
1110 | * previous poll |
1111 | */ |
1112 | time_diff = ktime_sub(uc->tx_drain.tstamp, |
1113 | time_diff) + 1; |
1114 | residue_diff -= uc->tx_drain.residue; |
1115 | if (residue_diff) { |
1116 | /* |
1117 | * Try to guess when we should check |
1118 | * next time by calculating rate at |
1119 | * which data is being drained at the |
1120 | * peer device |
1121 | */ |
1122 | delay = (time_diff / residue_diff) * |
1123 | uc->tx_drain.residue; |
1124 | } else { |
1125 | /* No progress, check again in 1 second */ |
1126 | schedule_delayed_work(dwork: &uc->tx_drain.work, HZ); |
1127 | break; |
1128 | } |
1129 | |
1130 | usleep_range(min: ktime_to_us(kt: delay), |
1131 | max: ktime_to_us(kt: delay) + 10); |
1132 | continue; |
1133 | } |
1134 | |
1135 | if (uc->desc) { |
1136 | struct udma_desc *d = uc->desc; |
1137 | |
1138 | udma_decrement_byte_counters(uc, val: d->residue); |
1139 | udma_start(uc); |
1140 | vchan_cookie_complete(vd: &d->vd); |
1141 | break; |
1142 | } |
1143 | |
1144 | break; |
1145 | } |
1146 | } |
1147 | |
1148 | static irqreturn_t udma_ring_irq_handler(int irq, void *data) |
1149 | { |
1150 | struct udma_chan *uc = data; |
1151 | struct udma_desc *d; |
1152 | dma_addr_t paddr = 0; |
1153 | |
1154 | if (udma_pop_from_ring(uc, addr: &paddr) || !paddr) |
1155 | return IRQ_HANDLED; |
1156 | |
1157 | spin_lock(lock: &uc->vc.lock); |
1158 | |
1159 | /* Teardown completion message */ |
1160 | if (cppi5_desc_is_tdcm(paddr)) { |
1161 | complete_all(&uc->teardown_completed); |
1162 | |
1163 | if (uc->terminated_desc) { |
1164 | udma_desc_free(vd: &uc->terminated_desc->vd); |
1165 | uc->terminated_desc = NULL; |
1166 | } |
1167 | |
1168 | if (!uc->desc) |
1169 | udma_start(uc); |
1170 | |
1171 | goto out; |
1172 | } |
1173 | |
1174 | d = udma_udma_desc_from_paddr(uc, paddr); |
1175 | |
1176 | if (d) { |
1177 | dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, |
1178 | idx: d->desc_idx); |
1179 | if (desc_paddr != paddr) { |
1180 | dev_err(uc->ud->dev, "not matching descriptors!\n" ); |
1181 | goto out; |
1182 | } |
1183 | |
1184 | if (d == uc->desc) { |
1185 | /* active descriptor */ |
1186 | if (uc->cyclic) { |
1187 | udma_cyclic_packet_elapsed(uc); |
1188 | vchan_cyclic_callback(vd: &d->vd); |
1189 | } else { |
1190 | if (udma_is_desc_really_done(uc, d)) { |
1191 | udma_decrement_byte_counters(uc, val: d->residue); |
1192 | udma_start(uc); |
1193 | vchan_cookie_complete(vd: &d->vd); |
1194 | } else { |
1195 | schedule_delayed_work(dwork: &uc->tx_drain.work, |
1196 | delay: 0); |
1197 | } |
1198 | } |
1199 | } else { |
1200 | /* |
1201 | * terminated descriptor, mark the descriptor as |
1202 | * completed to update the channel's cookie marker |
1203 | */ |
1204 | dma_cookie_complete(tx: &d->vd.tx); |
1205 | } |
1206 | } |
1207 | out: |
1208 | spin_unlock(lock: &uc->vc.lock); |
1209 | |
1210 | return IRQ_HANDLED; |
1211 | } |
1212 | |
1213 | static irqreturn_t udma_udma_irq_handler(int irq, void *data) |
1214 | { |
1215 | struct udma_chan *uc = data; |
1216 | struct udma_desc *d; |
1217 | |
1218 | spin_lock(lock: &uc->vc.lock); |
1219 | d = uc->desc; |
1220 | if (d) { |
1221 | d->tr_idx = (d->tr_idx + 1) % d->sglen; |
1222 | |
1223 | if (uc->cyclic) { |
1224 | vchan_cyclic_callback(vd: &d->vd); |
1225 | } else { |
1226 | /* TODO: figure out the real amount of data */ |
1227 | udma_decrement_byte_counters(uc, val: d->residue); |
1228 | udma_start(uc); |
1229 | vchan_cookie_complete(vd: &d->vd); |
1230 | } |
1231 | } |
1232 | |
1233 | spin_unlock(lock: &uc->vc.lock); |
1234 | |
1235 | return IRQ_HANDLED; |
1236 | } |
1237 | |
1238 | /** |
1239 | * __udma_alloc_gp_rflow_range - alloc range of GP RX flows |
1240 | * @ud: UDMA device |
1241 | * @from: Start the search from this flow id number |
1242 | * @cnt: Number of consecutive flow ids to allocate |
1243 | * |
1244 | * Allocate range of RX flow ids for future use, those flows can be requested |
1245 | * only using explicit flow id number. if @from is set to -1 it will try to find |
1246 | * first free range. if @from is positive value it will force allocation only |
1247 | * of the specified range of flows. |
1248 | * |
1249 | * Returns -ENOMEM if can't find free range. |
1250 | * -EEXIST if requested range is busy. |
1251 | * -EINVAL if wrong input values passed. |
1252 | * Returns flow id on success. |
1253 | */ |
1254 | static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt) |
1255 | { |
1256 | int start, tmp_from; |
1257 | DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS); |
1258 | |
1259 | tmp_from = from; |
1260 | if (tmp_from < 0) |
1261 | tmp_from = ud->rchan_cnt; |
1262 | /* default flows can't be allocated and accessible only by id */ |
1263 | if (tmp_from < ud->rchan_cnt) |
1264 | return -EINVAL; |
1265 | |
1266 | if (tmp_from + cnt > ud->rflow_cnt) |
1267 | return -EINVAL; |
1268 | |
1269 | bitmap_or(dst: tmp, src1: ud->rflow_gp_map, src2: ud->rflow_gp_map_allocated, |
1270 | nbits: ud->rflow_cnt); |
1271 | |
1272 | start = bitmap_find_next_zero_area(map: tmp, |
1273 | size: ud->rflow_cnt, |
1274 | start: tmp_from, nr: cnt, align_mask: 0); |
1275 | if (start >= ud->rflow_cnt) |
1276 | return -ENOMEM; |
1277 | |
1278 | if (from >= 0 && start != from) |
1279 | return -EEXIST; |
1280 | |
1281 | bitmap_set(map: ud->rflow_gp_map_allocated, start, nbits: cnt); |
1282 | return start; |
1283 | } |
1284 | |
1285 | static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt) |
1286 | { |
1287 | if (from < ud->rchan_cnt) |
1288 | return -EINVAL; |
1289 | if (from + cnt > ud->rflow_cnt) |
1290 | return -EINVAL; |
1291 | |
1292 | bitmap_clear(map: ud->rflow_gp_map_allocated, start: from, nbits: cnt); |
1293 | return 0; |
1294 | } |
1295 | |
1296 | static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id) |
1297 | { |
1298 | /* |
1299 | * Attempt to request rflow by ID can be made for any rflow |
1300 | * if not in use with assumption that caller knows what's doing. |
1301 | * TI-SCI FW will perform additional permission check ant way, it's |
1302 | * safe |
1303 | */ |
1304 | |
1305 | if (id < 0 || id >= ud->rflow_cnt) |
1306 | return ERR_PTR(error: -ENOENT); |
1307 | |
1308 | if (test_bit(id, ud->rflow_in_use)) |
1309 | return ERR_PTR(error: -ENOENT); |
1310 | |
1311 | if (ud->rflow_gp_map) { |
1312 | /* GP rflow has to be allocated first */ |
1313 | if (!test_bit(id, ud->rflow_gp_map) && |
1314 | !test_bit(id, ud->rflow_gp_map_allocated)) |
1315 | return ERR_PTR(error: -EINVAL); |
1316 | } |
1317 | |
1318 | dev_dbg(ud->dev, "get rflow%d\n" , id); |
1319 | set_bit(nr: id, addr: ud->rflow_in_use); |
1320 | return &ud->rflows[id]; |
1321 | } |
1322 | |
1323 | static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow) |
1324 | { |
1325 | if (!test_bit(rflow->id, ud->rflow_in_use)) { |
1326 | dev_err(ud->dev, "attempt to put unused rflow%d\n" , rflow->id); |
1327 | return; |
1328 | } |
1329 | |
1330 | dev_dbg(ud->dev, "put rflow%d\n" , rflow->id); |
1331 | clear_bit(nr: rflow->id, addr: ud->rflow_in_use); |
1332 | } |
1333 | |
1334 | #define UDMA_RESERVE_RESOURCE(res) \ |
1335 | static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \ |
1336 | enum udma_tp_level tpl, \ |
1337 | int id) \ |
1338 | { \ |
1339 | if (id >= 0) { \ |
1340 | if (test_bit(id, ud->res##_map)) { \ |
1341 | dev_err(ud->dev, "res##%d is in use\n", id); \ |
1342 | return ERR_PTR(-ENOENT); \ |
1343 | } \ |
1344 | } else { \ |
1345 | int start; \ |
1346 | \ |
1347 | if (tpl >= ud->res##_tpl.levels) \ |
1348 | tpl = ud->res##_tpl.levels - 1; \ |
1349 | \ |
1350 | start = ud->res##_tpl.start_idx[tpl]; \ |
1351 | \ |
1352 | id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \ |
1353 | start); \ |
1354 | if (id == ud->res##_cnt) { \ |
1355 | return ERR_PTR(-ENOENT); \ |
1356 | } \ |
1357 | } \ |
1358 | \ |
1359 | set_bit(id, ud->res##_map); \ |
1360 | return &ud->res##s[id]; \ |
1361 | } |
1362 | |
1363 | UDMA_RESERVE_RESOURCE(bchan); |
1364 | UDMA_RESERVE_RESOURCE(tchan); |
1365 | UDMA_RESERVE_RESOURCE(rchan); |
1366 | |
1367 | static int bcdma_get_bchan(struct udma_chan *uc) |
1368 | { |
1369 | struct udma_dev *ud = uc->ud; |
1370 | enum udma_tp_level tpl; |
1371 | int ret; |
1372 | |
1373 | if (uc->bchan) { |
1374 | dev_dbg(ud->dev, "chan%d: already have bchan%d allocated\n" , |
1375 | uc->id, uc->bchan->id); |
1376 | return 0; |
1377 | } |
1378 | |
1379 | /* |
1380 | * Use normal channels for peripherals, and highest TPL channel for |
1381 | * mem2mem |
1382 | */ |
1383 | if (uc->config.tr_trigger_type) |
1384 | tpl = 0; |
1385 | else |
1386 | tpl = ud->bchan_tpl.levels - 1; |
1387 | |
1388 | uc->bchan = __udma_reserve_bchan(ud, tpl, id: -1); |
1389 | if (IS_ERR(ptr: uc->bchan)) { |
1390 | ret = PTR_ERR(ptr: uc->bchan); |
1391 | uc->bchan = NULL; |
1392 | return ret; |
1393 | } |
1394 | |
1395 | uc->tchan = uc->bchan; |
1396 | |
1397 | return 0; |
1398 | } |
1399 | |
1400 | static int udma_get_tchan(struct udma_chan *uc) |
1401 | { |
1402 | struct udma_dev *ud = uc->ud; |
1403 | int ret; |
1404 | |
1405 | if (uc->tchan) { |
1406 | dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n" , |
1407 | uc->id, uc->tchan->id); |
1408 | return 0; |
1409 | } |
1410 | |
1411 | /* |
1412 | * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels. |
1413 | * For PKTDMA mapped channels it is configured to a channel which must |
1414 | * be used to service the peripheral. |
1415 | */ |
1416 | uc->tchan = __udma_reserve_tchan(ud, tpl: uc->config.channel_tpl, |
1417 | id: uc->config.mapped_channel_id); |
1418 | if (IS_ERR(ptr: uc->tchan)) { |
1419 | ret = PTR_ERR(ptr: uc->tchan); |
1420 | uc->tchan = NULL; |
1421 | return ret; |
1422 | } |
1423 | |
1424 | if (ud->tflow_cnt) { |
1425 | int tflow_id; |
1426 | |
1427 | /* Only PKTDMA have support for tx flows */ |
1428 | if (uc->config.default_flow_id >= 0) |
1429 | tflow_id = uc->config.default_flow_id; |
1430 | else |
1431 | tflow_id = uc->tchan->id; |
1432 | |
1433 | if (test_bit(tflow_id, ud->tflow_map)) { |
1434 | dev_err(ud->dev, "tflow%d is in use\n" , tflow_id); |
1435 | clear_bit(nr: uc->tchan->id, addr: ud->tchan_map); |
1436 | uc->tchan = NULL; |
1437 | return -ENOENT; |
1438 | } |
1439 | |
1440 | uc->tchan->tflow_id = tflow_id; |
1441 | set_bit(nr: tflow_id, addr: ud->tflow_map); |
1442 | } else { |
1443 | uc->tchan->tflow_id = -1; |
1444 | } |
1445 | |
1446 | return 0; |
1447 | } |
1448 | |
1449 | static int udma_get_rchan(struct udma_chan *uc) |
1450 | { |
1451 | struct udma_dev *ud = uc->ud; |
1452 | int ret; |
1453 | |
1454 | if (uc->rchan) { |
1455 | dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n" , |
1456 | uc->id, uc->rchan->id); |
1457 | return 0; |
1458 | } |
1459 | |
1460 | /* |
1461 | * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels. |
1462 | * For PKTDMA mapped channels it is configured to a channel which must |
1463 | * be used to service the peripheral. |
1464 | */ |
1465 | uc->rchan = __udma_reserve_rchan(ud, tpl: uc->config.channel_tpl, |
1466 | id: uc->config.mapped_channel_id); |
1467 | if (IS_ERR(ptr: uc->rchan)) { |
1468 | ret = PTR_ERR(ptr: uc->rchan); |
1469 | uc->rchan = NULL; |
1470 | return ret; |
1471 | } |
1472 | |
1473 | return 0; |
1474 | } |
1475 | |
1476 | static int udma_get_chan_pair(struct udma_chan *uc) |
1477 | { |
1478 | struct udma_dev *ud = uc->ud; |
1479 | int chan_id, end; |
1480 | |
1481 | if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) { |
1482 | dev_info(ud->dev, "chan%d: already have %d pair allocated\n" , |
1483 | uc->id, uc->tchan->id); |
1484 | return 0; |
1485 | } |
1486 | |
1487 | if (uc->tchan) { |
1488 | dev_err(ud->dev, "chan%d: already have tchan%d allocated\n" , |
1489 | uc->id, uc->tchan->id); |
1490 | return -EBUSY; |
1491 | } else if (uc->rchan) { |
1492 | dev_err(ud->dev, "chan%d: already have rchan%d allocated\n" , |
1493 | uc->id, uc->rchan->id); |
1494 | return -EBUSY; |
1495 | } |
1496 | |
1497 | /* Can be optimized, but let's have it like this for now */ |
1498 | end = min(ud->tchan_cnt, ud->rchan_cnt); |
1499 | /* |
1500 | * Try to use the highest TPL channel pair for MEM_TO_MEM channels |
1501 | * Note: in UDMAP the channel TPL is symmetric between tchan and rchan |
1502 | */ |
1503 | chan_id = ud->tchan_tpl.start_idx[ud->tchan_tpl.levels - 1]; |
1504 | for (; chan_id < end; chan_id++) { |
1505 | if (!test_bit(chan_id, ud->tchan_map) && |
1506 | !test_bit(chan_id, ud->rchan_map)) |
1507 | break; |
1508 | } |
1509 | |
1510 | if (chan_id == end) |
1511 | return -ENOENT; |
1512 | |
1513 | set_bit(nr: chan_id, addr: ud->tchan_map); |
1514 | set_bit(nr: chan_id, addr: ud->rchan_map); |
1515 | uc->tchan = &ud->tchans[chan_id]; |
1516 | uc->rchan = &ud->rchans[chan_id]; |
1517 | |
1518 | /* UDMA does not use tx flows */ |
1519 | uc->tchan->tflow_id = -1; |
1520 | |
1521 | return 0; |
1522 | } |
1523 | |
1524 | static int udma_get_rflow(struct udma_chan *uc, int flow_id) |
1525 | { |
1526 | struct udma_dev *ud = uc->ud; |
1527 | int ret; |
1528 | |
1529 | if (!uc->rchan) { |
1530 | dev_err(ud->dev, "chan%d: does not have rchan??\n" , uc->id); |
1531 | return -EINVAL; |
1532 | } |
1533 | |
1534 | if (uc->rflow) { |
1535 | dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n" , |
1536 | uc->id, uc->rflow->id); |
1537 | return 0; |
1538 | } |
1539 | |
1540 | uc->rflow = __udma_get_rflow(ud, id: flow_id); |
1541 | if (IS_ERR(ptr: uc->rflow)) { |
1542 | ret = PTR_ERR(ptr: uc->rflow); |
1543 | uc->rflow = NULL; |
1544 | return ret; |
1545 | } |
1546 | |
1547 | return 0; |
1548 | } |
1549 | |
1550 | static void bcdma_put_bchan(struct udma_chan *uc) |
1551 | { |
1552 | struct udma_dev *ud = uc->ud; |
1553 | |
1554 | if (uc->bchan) { |
1555 | dev_dbg(ud->dev, "chan%d: put bchan%d\n" , uc->id, |
1556 | uc->bchan->id); |
1557 | clear_bit(nr: uc->bchan->id, addr: ud->bchan_map); |
1558 | uc->bchan = NULL; |
1559 | uc->tchan = NULL; |
1560 | } |
1561 | } |
1562 | |
1563 | static void udma_put_rchan(struct udma_chan *uc) |
1564 | { |
1565 | struct udma_dev *ud = uc->ud; |
1566 | |
1567 | if (uc->rchan) { |
1568 | dev_dbg(ud->dev, "chan%d: put rchan%d\n" , uc->id, |
1569 | uc->rchan->id); |
1570 | clear_bit(nr: uc->rchan->id, addr: ud->rchan_map); |
1571 | uc->rchan = NULL; |
1572 | } |
1573 | } |
1574 | |
1575 | static void udma_put_tchan(struct udma_chan *uc) |
1576 | { |
1577 | struct udma_dev *ud = uc->ud; |
1578 | |
1579 | if (uc->tchan) { |
1580 | dev_dbg(ud->dev, "chan%d: put tchan%d\n" , uc->id, |
1581 | uc->tchan->id); |
1582 | clear_bit(nr: uc->tchan->id, addr: ud->tchan_map); |
1583 | |
1584 | if (uc->tchan->tflow_id >= 0) |
1585 | clear_bit(nr: uc->tchan->tflow_id, addr: ud->tflow_map); |
1586 | |
1587 | uc->tchan = NULL; |
1588 | } |
1589 | } |
1590 | |
1591 | static void udma_put_rflow(struct udma_chan *uc) |
1592 | { |
1593 | struct udma_dev *ud = uc->ud; |
1594 | |
1595 | if (uc->rflow) { |
1596 | dev_dbg(ud->dev, "chan%d: put rflow%d\n" , uc->id, |
1597 | uc->rflow->id); |
1598 | __udma_put_rflow(ud, rflow: uc->rflow); |
1599 | uc->rflow = NULL; |
1600 | } |
1601 | } |
1602 | |
1603 | static void bcdma_free_bchan_resources(struct udma_chan *uc) |
1604 | { |
1605 | if (!uc->bchan) |
1606 | return; |
1607 | |
1608 | k3_ringacc_ring_free(ring: uc->bchan->tc_ring); |
1609 | k3_ringacc_ring_free(ring: uc->bchan->t_ring); |
1610 | uc->bchan->tc_ring = NULL; |
1611 | uc->bchan->t_ring = NULL; |
1612 | k3_configure_chan_coherency(chan: &uc->vc.chan, asel: 0); |
1613 | |
1614 | bcdma_put_bchan(uc); |
1615 | } |
1616 | |
1617 | static int bcdma_alloc_bchan_resources(struct udma_chan *uc) |
1618 | { |
1619 | struct k3_ring_cfg ring_cfg; |
1620 | struct udma_dev *ud = uc->ud; |
1621 | int ret; |
1622 | |
1623 | ret = bcdma_get_bchan(uc); |
1624 | if (ret) |
1625 | return ret; |
1626 | |
1627 | ret = k3_ringacc_request_rings_pair(ringacc: ud->ringacc, fwd_id: uc->bchan->id, compl_id: -1, |
1628 | fwd_ring: &uc->bchan->t_ring, |
1629 | compl_ring: &uc->bchan->tc_ring); |
1630 | if (ret) { |
1631 | ret = -EBUSY; |
1632 | goto err_ring; |
1633 | } |
1634 | |
1635 | memset(&ring_cfg, 0, sizeof(ring_cfg)); |
1636 | ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; |
1637 | ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8; |
1638 | ring_cfg.mode = K3_RINGACC_RING_MODE_RING; |
1639 | |
1640 | k3_configure_chan_coherency(chan: &uc->vc.chan, asel: ud->asel); |
1641 | ring_cfg.asel = ud->asel; |
1642 | ring_cfg.dma_dev = dmaengine_get_dma_device(chan: &uc->vc.chan); |
1643 | |
1644 | ret = k3_ringacc_ring_cfg(ring: uc->bchan->t_ring, cfg: &ring_cfg); |
1645 | if (ret) |
1646 | goto err_ringcfg; |
1647 | |
1648 | return 0; |
1649 | |
1650 | err_ringcfg: |
1651 | k3_ringacc_ring_free(ring: uc->bchan->tc_ring); |
1652 | uc->bchan->tc_ring = NULL; |
1653 | k3_ringacc_ring_free(ring: uc->bchan->t_ring); |
1654 | uc->bchan->t_ring = NULL; |
1655 | k3_configure_chan_coherency(chan: &uc->vc.chan, asel: 0); |
1656 | err_ring: |
1657 | bcdma_put_bchan(uc); |
1658 | |
1659 | return ret; |
1660 | } |
1661 | |
1662 | static void udma_free_tx_resources(struct udma_chan *uc) |
1663 | { |
1664 | if (!uc->tchan) |
1665 | return; |
1666 | |
1667 | k3_ringacc_ring_free(ring: uc->tchan->t_ring); |
1668 | k3_ringacc_ring_free(ring: uc->tchan->tc_ring); |
1669 | uc->tchan->t_ring = NULL; |
1670 | uc->tchan->tc_ring = NULL; |
1671 | |
1672 | udma_put_tchan(uc); |
1673 | } |
1674 | |
1675 | static int udma_alloc_tx_resources(struct udma_chan *uc) |
1676 | { |
1677 | struct k3_ring_cfg ring_cfg; |
1678 | struct udma_dev *ud = uc->ud; |
1679 | struct udma_tchan *tchan; |
1680 | int ring_idx, ret; |
1681 | |
1682 | ret = udma_get_tchan(uc); |
1683 | if (ret) |
1684 | return ret; |
1685 | |
1686 | tchan = uc->tchan; |
1687 | if (tchan->tflow_id >= 0) |
1688 | ring_idx = tchan->tflow_id; |
1689 | else |
1690 | ring_idx = ud->bchan_cnt + tchan->id; |
1691 | |
1692 | ret = k3_ringacc_request_rings_pair(ringacc: ud->ringacc, fwd_id: ring_idx, compl_id: -1, |
1693 | fwd_ring: &tchan->t_ring, |
1694 | compl_ring: &tchan->tc_ring); |
1695 | if (ret) { |
1696 | ret = -EBUSY; |
1697 | goto err_ring; |
1698 | } |
1699 | |
1700 | memset(&ring_cfg, 0, sizeof(ring_cfg)); |
1701 | ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; |
1702 | ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8; |
1703 | if (ud->match_data->type == DMA_TYPE_UDMA) { |
1704 | ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE; |
1705 | } else { |
1706 | ring_cfg.mode = K3_RINGACC_RING_MODE_RING; |
1707 | |
1708 | k3_configure_chan_coherency(chan: &uc->vc.chan, asel: uc->config.asel); |
1709 | ring_cfg.asel = uc->config.asel; |
1710 | ring_cfg.dma_dev = dmaengine_get_dma_device(chan: &uc->vc.chan); |
1711 | } |
1712 | |
1713 | ret = k3_ringacc_ring_cfg(ring: tchan->t_ring, cfg: &ring_cfg); |
1714 | ret |= k3_ringacc_ring_cfg(ring: tchan->tc_ring, cfg: &ring_cfg); |
1715 | |
1716 | if (ret) |
1717 | goto err_ringcfg; |
1718 | |
1719 | return 0; |
1720 | |
1721 | err_ringcfg: |
1722 | k3_ringacc_ring_free(ring: uc->tchan->tc_ring); |
1723 | uc->tchan->tc_ring = NULL; |
1724 | k3_ringacc_ring_free(ring: uc->tchan->t_ring); |
1725 | uc->tchan->t_ring = NULL; |
1726 | err_ring: |
1727 | udma_put_tchan(uc); |
1728 | |
1729 | return ret; |
1730 | } |
1731 | |
1732 | static void udma_free_rx_resources(struct udma_chan *uc) |
1733 | { |
1734 | if (!uc->rchan) |
1735 | return; |
1736 | |
1737 | if (uc->rflow) { |
1738 | struct udma_rflow *rflow = uc->rflow; |
1739 | |
1740 | k3_ringacc_ring_free(ring: rflow->fd_ring); |
1741 | k3_ringacc_ring_free(ring: rflow->r_ring); |
1742 | rflow->fd_ring = NULL; |
1743 | rflow->r_ring = NULL; |
1744 | |
1745 | udma_put_rflow(uc); |
1746 | } |
1747 | |
1748 | udma_put_rchan(uc); |
1749 | } |
1750 | |
1751 | static int udma_alloc_rx_resources(struct udma_chan *uc) |
1752 | { |
1753 | struct udma_dev *ud = uc->ud; |
1754 | struct k3_ring_cfg ring_cfg; |
1755 | struct udma_rflow *rflow; |
1756 | int fd_ring_id; |
1757 | int ret; |
1758 | |
1759 | ret = udma_get_rchan(uc); |
1760 | if (ret) |
1761 | return ret; |
1762 | |
1763 | /* For MEM_TO_MEM we don't need rflow or rings */ |
1764 | if (uc->config.dir == DMA_MEM_TO_MEM) |
1765 | return 0; |
1766 | |
1767 | if (uc->config.default_flow_id >= 0) |
1768 | ret = udma_get_rflow(uc, flow_id: uc->config.default_flow_id); |
1769 | else |
1770 | ret = udma_get_rflow(uc, flow_id: uc->rchan->id); |
1771 | |
1772 | if (ret) { |
1773 | ret = -EBUSY; |
1774 | goto err_rflow; |
1775 | } |
1776 | |
1777 | rflow = uc->rflow; |
1778 | if (ud->tflow_cnt) |
1779 | fd_ring_id = ud->tflow_cnt + rflow->id; |
1780 | else |
1781 | fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt + |
1782 | uc->rchan->id; |
1783 | |
1784 | ret = k3_ringacc_request_rings_pair(ringacc: ud->ringacc, fwd_id: fd_ring_id, compl_id: -1, |
1785 | fwd_ring: &rflow->fd_ring, compl_ring: &rflow->r_ring); |
1786 | if (ret) { |
1787 | ret = -EBUSY; |
1788 | goto err_ring; |
1789 | } |
1790 | |
1791 | memset(&ring_cfg, 0, sizeof(ring_cfg)); |
1792 | |
1793 | ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8; |
1794 | if (ud->match_data->type == DMA_TYPE_UDMA) { |
1795 | if (uc->config.pkt_mode) |
1796 | ring_cfg.size = SG_MAX_SEGMENTS; |
1797 | else |
1798 | ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; |
1799 | |
1800 | ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE; |
1801 | } else { |
1802 | ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; |
1803 | ring_cfg.mode = K3_RINGACC_RING_MODE_RING; |
1804 | |
1805 | k3_configure_chan_coherency(chan: &uc->vc.chan, asel: uc->config.asel); |
1806 | ring_cfg.asel = uc->config.asel; |
1807 | ring_cfg.dma_dev = dmaengine_get_dma_device(chan: &uc->vc.chan); |
1808 | } |
1809 | |
1810 | ret = k3_ringacc_ring_cfg(ring: rflow->fd_ring, cfg: &ring_cfg); |
1811 | |
1812 | ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; |
1813 | ret |= k3_ringacc_ring_cfg(ring: rflow->r_ring, cfg: &ring_cfg); |
1814 | |
1815 | if (ret) |
1816 | goto err_ringcfg; |
1817 | |
1818 | return 0; |
1819 | |
1820 | err_ringcfg: |
1821 | k3_ringacc_ring_free(ring: rflow->r_ring); |
1822 | rflow->r_ring = NULL; |
1823 | k3_ringacc_ring_free(ring: rflow->fd_ring); |
1824 | rflow->fd_ring = NULL; |
1825 | err_ring: |
1826 | udma_put_rflow(uc); |
1827 | err_rflow: |
1828 | udma_put_rchan(uc); |
1829 | |
1830 | return ret; |
1831 | } |
1832 | |
1833 | #define TISCI_BCDMA_BCHAN_VALID_PARAMS ( \ |
1834 | TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ |
1835 | TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID) |
1836 | |
1837 | #define TISCI_BCDMA_TCHAN_VALID_PARAMS ( \ |
1838 | TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ |
1839 | TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID) |
1840 | |
1841 | #define TISCI_BCDMA_RCHAN_VALID_PARAMS ( \ |
1842 | TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID) |
1843 | |
1844 | #define TISCI_UDMA_TCHAN_VALID_PARAMS ( \ |
1845 | TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ |
1846 | TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \ |
1847 | TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \ |
1848 | TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \ |
1849 | TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \ |
1850 | TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \ |
1851 | TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \ |
1852 | TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID) |
1853 | |
1854 | #define TISCI_UDMA_RCHAN_VALID_PARAMS ( \ |
1855 | TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ |
1856 | TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \ |
1857 | TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \ |
1858 | TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \ |
1859 | TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \ |
1860 | TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \ |
1861 | TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \ |
1862 | TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \ |
1863 | TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID) |
1864 | |
1865 | static int udma_tisci_m2m_channel_config(struct udma_chan *uc) |
1866 | { |
1867 | struct udma_dev *ud = uc->ud; |
1868 | struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; |
1869 | const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; |
1870 | struct udma_tchan *tchan = uc->tchan; |
1871 | struct udma_rchan *rchan = uc->rchan; |
1872 | u8 burst_size = 0; |
1873 | int ret; |
1874 | u8 tpl; |
1875 | |
1876 | /* Non synchronized - mem to mem type of transfer */ |
1877 | int tc_ring = k3_ringacc_get_ring_id(ring: tchan->tc_ring); |
1878 | struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; |
1879 | struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; |
1880 | |
1881 | if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) { |
1882 | tpl = udma_get_chan_tpl_index(tpl_map: &ud->tchan_tpl, chan_id: tchan->id); |
1883 | |
1884 | burst_size = ud->match_data->burst_size[tpl]; |
1885 | } |
1886 | |
1887 | req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS; |
1888 | req_tx.nav_id = tisci_rm->tisci_dev_id; |
1889 | req_tx.index = tchan->id; |
1890 | req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR; |
1891 | req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; |
1892 | req_tx.txcq_qnum = tc_ring; |
1893 | req_tx.tx_atype = ud->atype; |
1894 | if (burst_size) { |
1895 | req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID; |
1896 | req_tx.tx_burst_size = burst_size; |
1897 | } |
1898 | |
1899 | ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); |
1900 | if (ret) { |
1901 | dev_err(ud->dev, "tchan%d cfg failed %d\n" , tchan->id, ret); |
1902 | return ret; |
1903 | } |
1904 | |
1905 | req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS; |
1906 | req_rx.nav_id = tisci_rm->tisci_dev_id; |
1907 | req_rx.index = rchan->id; |
1908 | req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; |
1909 | req_rx.rxcq_qnum = tc_ring; |
1910 | req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR; |
1911 | req_rx.rx_atype = ud->atype; |
1912 | if (burst_size) { |
1913 | req_rx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID; |
1914 | req_rx.rx_burst_size = burst_size; |
1915 | } |
1916 | |
1917 | ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); |
1918 | if (ret) |
1919 | dev_err(ud->dev, "rchan%d alloc failed %d\n" , rchan->id, ret); |
1920 | |
1921 | return ret; |
1922 | } |
1923 | |
1924 | static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc) |
1925 | { |
1926 | struct udma_dev *ud = uc->ud; |
1927 | struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; |
1928 | const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; |
1929 | struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; |
1930 | struct udma_bchan *bchan = uc->bchan; |
1931 | u8 burst_size = 0; |
1932 | int ret; |
1933 | u8 tpl; |
1934 | |
1935 | if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) { |
1936 | tpl = udma_get_chan_tpl_index(tpl_map: &ud->bchan_tpl, chan_id: bchan->id); |
1937 | |
1938 | burst_size = ud->match_data->burst_size[tpl]; |
1939 | } |
1940 | |
1941 | req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS; |
1942 | req_tx.nav_id = tisci_rm->tisci_dev_id; |
1943 | req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN; |
1944 | req_tx.index = bchan->id; |
1945 | if (burst_size) { |
1946 | req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID; |
1947 | req_tx.tx_burst_size = burst_size; |
1948 | } |
1949 | |
1950 | ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); |
1951 | if (ret) |
1952 | dev_err(ud->dev, "bchan%d cfg failed %d\n" , bchan->id, ret); |
1953 | |
1954 | return ret; |
1955 | } |
1956 | |
1957 | static int udma_tisci_tx_channel_config(struct udma_chan *uc) |
1958 | { |
1959 | struct udma_dev *ud = uc->ud; |
1960 | struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; |
1961 | const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; |
1962 | struct udma_tchan *tchan = uc->tchan; |
1963 | int tc_ring = k3_ringacc_get_ring_id(ring: tchan->tc_ring); |
1964 | struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; |
1965 | u32 mode, fetch_size; |
1966 | int ret; |
1967 | |
1968 | if (uc->config.pkt_mode) { |
1969 | mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; |
1970 | fetch_size = cppi5_hdesc_calc_size(epib: uc->config.needs_epib, |
1971 | psdata_size: uc->config.psd_size, sw_data_size: 0); |
1972 | } else { |
1973 | mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR; |
1974 | fetch_size = sizeof(struct cppi5_desc_hdr_t); |
1975 | } |
1976 | |
1977 | req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS; |
1978 | req_tx.nav_id = tisci_rm->tisci_dev_id; |
1979 | req_tx.index = tchan->id; |
1980 | req_tx.tx_chan_type = mode; |
1981 | req_tx.tx_supr_tdpkt = uc->config.notdpkt; |
1982 | req_tx.tx_fetch_size = fetch_size >> 2; |
1983 | req_tx.txcq_qnum = tc_ring; |
1984 | req_tx.tx_atype = uc->config.atype; |
1985 | if (uc->config.ep_type == PSIL_EP_PDMA_XY && |
1986 | ud->match_data->flags & UDMA_FLAG_TDTYPE) { |
1987 | /* wait for peer to complete the teardown for PDMAs */ |
1988 | req_tx.valid_params |= |
1989 | TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID; |
1990 | req_tx.tx_tdtype = 1; |
1991 | } |
1992 | |
1993 | ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); |
1994 | if (ret) |
1995 | dev_err(ud->dev, "tchan%d cfg failed %d\n" , tchan->id, ret); |
1996 | |
1997 | return ret; |
1998 | } |
1999 | |
2000 | static int bcdma_tisci_tx_channel_config(struct udma_chan *uc) |
2001 | { |
2002 | struct udma_dev *ud = uc->ud; |
2003 | struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; |
2004 | const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; |
2005 | struct udma_tchan *tchan = uc->tchan; |
2006 | struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; |
2007 | int ret; |
2008 | |
2009 | req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS; |
2010 | req_tx.nav_id = tisci_rm->tisci_dev_id; |
2011 | req_tx.index = tchan->id; |
2012 | req_tx.tx_supr_tdpkt = uc->config.notdpkt; |
2013 | if (ud->match_data->flags & UDMA_FLAG_TDTYPE) { |
2014 | /* wait for peer to complete the teardown for PDMAs */ |
2015 | req_tx.valid_params |= |
2016 | TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID; |
2017 | req_tx.tx_tdtype = 1; |
2018 | } |
2019 | |
2020 | ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); |
2021 | if (ret) |
2022 | dev_err(ud->dev, "tchan%d cfg failed %d\n" , tchan->id, ret); |
2023 | |
2024 | return ret; |
2025 | } |
2026 | |
2027 | #define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config |
2028 | |
2029 | static int udma_tisci_rx_channel_config(struct udma_chan *uc) |
2030 | { |
2031 | struct udma_dev *ud = uc->ud; |
2032 | struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; |
2033 | const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; |
2034 | struct udma_rchan *rchan = uc->rchan; |
2035 | int fd_ring = k3_ringacc_get_ring_id(ring: uc->rflow->fd_ring); |
2036 | int rx_ring = k3_ringacc_get_ring_id(ring: uc->rflow->r_ring); |
2037 | struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; |
2038 | struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 }; |
2039 | u32 mode, fetch_size; |
2040 | int ret; |
2041 | |
2042 | if (uc->config.pkt_mode) { |
2043 | mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; |
2044 | fetch_size = cppi5_hdesc_calc_size(epib: uc->config.needs_epib, |
2045 | psdata_size: uc->config.psd_size, sw_data_size: 0); |
2046 | } else { |
2047 | mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR; |
2048 | fetch_size = sizeof(struct cppi5_desc_hdr_t); |
2049 | } |
2050 | |
2051 | req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS; |
2052 | req_rx.nav_id = tisci_rm->tisci_dev_id; |
2053 | req_rx.index = rchan->id; |
2054 | req_rx.rx_fetch_size = fetch_size >> 2; |
2055 | req_rx.rxcq_qnum = rx_ring; |
2056 | req_rx.rx_chan_type = mode; |
2057 | req_rx.rx_atype = uc->config.atype; |
2058 | |
2059 | ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); |
2060 | if (ret) { |
2061 | dev_err(ud->dev, "rchan%d cfg failed %d\n" , rchan->id, ret); |
2062 | return ret; |
2063 | } |
2064 | |
2065 | flow_req.valid_params = |
2066 | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID | |
2067 | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID | |
2068 | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID | |
2069 | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID | |
2070 | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | |
2071 | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID | |
2072 | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID | |
2073 | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID | |
2074 | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID | |
2075 | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | |
2076 | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | |
2077 | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | |
2078 | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID; |
2079 | |
2080 | flow_req.nav_id = tisci_rm->tisci_dev_id; |
2081 | flow_req.flow_index = rchan->id; |
2082 | |
2083 | if (uc->config.needs_epib) |
2084 | flow_req.rx_einfo_present = 1; |
2085 | else |
2086 | flow_req.rx_einfo_present = 0; |
2087 | if (uc->config.psd_size) |
2088 | flow_req.rx_psinfo_present = 1; |
2089 | else |
2090 | flow_req.rx_psinfo_present = 0; |
2091 | flow_req.rx_error_handling = 1; |
2092 | flow_req.rx_dest_qnum = rx_ring; |
2093 | flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE; |
2094 | flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG; |
2095 | flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI; |
2096 | flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO; |
2097 | flow_req.rx_fdq0_sz0_qnum = fd_ring; |
2098 | flow_req.rx_fdq1_qnum = fd_ring; |
2099 | flow_req.rx_fdq2_qnum = fd_ring; |
2100 | flow_req.rx_fdq3_qnum = fd_ring; |
2101 | |
2102 | ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req); |
2103 | |
2104 | if (ret) |
2105 | dev_err(ud->dev, "flow%d config failed: %d\n" , rchan->id, ret); |
2106 | |
2107 | return 0; |
2108 | } |
2109 | |
2110 | static int bcdma_tisci_rx_channel_config(struct udma_chan *uc) |
2111 | { |
2112 | struct udma_dev *ud = uc->ud; |
2113 | struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; |
2114 | const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; |
2115 | struct udma_rchan *rchan = uc->rchan; |
2116 | struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; |
2117 | int ret; |
2118 | |
2119 | req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS; |
2120 | req_rx.nav_id = tisci_rm->tisci_dev_id; |
2121 | req_rx.index = rchan->id; |
2122 | |
2123 | ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); |
2124 | if (ret) |
2125 | dev_err(ud->dev, "rchan%d cfg failed %d\n" , rchan->id, ret); |
2126 | |
2127 | return ret; |
2128 | } |
2129 | |
2130 | static int pktdma_tisci_rx_channel_config(struct udma_chan *uc) |
2131 | { |
2132 | struct udma_dev *ud = uc->ud; |
2133 | struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; |
2134 | const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; |
2135 | struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; |
2136 | struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 }; |
2137 | int ret; |
2138 | |
2139 | req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS; |
2140 | req_rx.nav_id = tisci_rm->tisci_dev_id; |
2141 | req_rx.index = uc->rchan->id; |
2142 | |
2143 | ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); |
2144 | if (ret) { |
2145 | dev_err(ud->dev, "rchan%d cfg failed %d\n" , uc->rchan->id, ret); |
2146 | return ret; |
2147 | } |
2148 | |
2149 | flow_req.valid_params = |
2150 | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID | |
2151 | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID | |
2152 | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID; |
2153 | |
2154 | flow_req.nav_id = tisci_rm->tisci_dev_id; |
2155 | flow_req.flow_index = uc->rflow->id; |
2156 | |
2157 | if (uc->config.needs_epib) |
2158 | flow_req.rx_einfo_present = 1; |
2159 | else |
2160 | flow_req.rx_einfo_present = 0; |
2161 | if (uc->config.psd_size) |
2162 | flow_req.rx_psinfo_present = 1; |
2163 | else |
2164 | flow_req.rx_psinfo_present = 0; |
2165 | flow_req.rx_error_handling = 1; |
2166 | |
2167 | ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req); |
2168 | |
2169 | if (ret) |
2170 | dev_err(ud->dev, "flow%d config failed: %d\n" , uc->rflow->id, |
2171 | ret); |
2172 | |
2173 | return ret; |
2174 | } |
2175 | |
2176 | static int udma_alloc_chan_resources(struct dma_chan *chan) |
2177 | { |
2178 | struct udma_chan *uc = to_udma_chan(c: chan); |
2179 | struct udma_dev *ud = to_udma_dev(d: chan->device); |
2180 | const struct udma_soc_data *soc_data = ud->soc_data; |
2181 | struct k3_ring *irq_ring; |
2182 | u32 irq_udma_idx; |
2183 | int ret; |
2184 | |
2185 | uc->dma_dev = ud->dev; |
2186 | |
2187 | if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) { |
2188 | uc->use_dma_pool = true; |
2189 | /* in case of MEM_TO_MEM we have maximum of two TRs */ |
2190 | if (uc->config.dir == DMA_MEM_TO_MEM) { |
2191 | uc->config.hdesc_size = cppi5_trdesc_calc_size( |
2192 | tr_count: sizeof(struct cppi5_tr_type15_t), tr_size: 2); |
2193 | uc->config.pkt_mode = false; |
2194 | } |
2195 | } |
2196 | |
2197 | if (uc->use_dma_pool) { |
2198 | uc->hdesc_pool = dma_pool_create(name: uc->name, dev: ud->ddev.dev, |
2199 | size: uc->config.hdesc_size, |
2200 | align: ud->desc_align, |
2201 | allocation: 0); |
2202 | if (!uc->hdesc_pool) { |
2203 | dev_err(ud->ddev.dev, |
2204 | "Descriptor pool allocation failed\n" ); |
2205 | uc->use_dma_pool = false; |
2206 | ret = -ENOMEM; |
2207 | goto err_cleanup; |
2208 | } |
2209 | } |
2210 | |
2211 | /* |
2212 | * Make sure that the completion is in a known state: |
2213 | * No teardown, the channel is idle |
2214 | */ |
2215 | reinit_completion(x: &uc->teardown_completed); |
2216 | complete_all(&uc->teardown_completed); |
2217 | uc->state = UDMA_CHAN_IS_IDLE; |
2218 | |
2219 | switch (uc->config.dir) { |
2220 | case DMA_MEM_TO_MEM: |
2221 | /* Non synchronized - mem to mem type of transfer */ |
2222 | dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n" , __func__, |
2223 | uc->id); |
2224 | |
2225 | ret = udma_get_chan_pair(uc); |
2226 | if (ret) |
2227 | goto err_cleanup; |
2228 | |
2229 | ret = udma_alloc_tx_resources(uc); |
2230 | if (ret) { |
2231 | udma_put_rchan(uc); |
2232 | goto err_cleanup; |
2233 | } |
2234 | |
2235 | ret = udma_alloc_rx_resources(uc); |
2236 | if (ret) { |
2237 | udma_free_tx_resources(uc); |
2238 | goto err_cleanup; |
2239 | } |
2240 | |
2241 | uc->config.src_thread = ud->psil_base + uc->tchan->id; |
2242 | uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | |
2243 | K3_PSIL_DST_THREAD_ID_OFFSET; |
2244 | |
2245 | irq_ring = uc->tchan->tc_ring; |
2246 | irq_udma_idx = uc->tchan->id; |
2247 | |
2248 | ret = udma_tisci_m2m_channel_config(uc); |
2249 | break; |
2250 | case DMA_MEM_TO_DEV: |
2251 | /* Slave transfer synchronized - mem to dev (TX) trasnfer */ |
2252 | dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n" , __func__, |
2253 | uc->id); |
2254 | |
2255 | ret = udma_alloc_tx_resources(uc); |
2256 | if (ret) |
2257 | goto err_cleanup; |
2258 | |
2259 | uc->config.src_thread = ud->psil_base + uc->tchan->id; |
2260 | uc->config.dst_thread = uc->config.remote_thread_id; |
2261 | uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; |
2262 | |
2263 | irq_ring = uc->tchan->tc_ring; |
2264 | irq_udma_idx = uc->tchan->id; |
2265 | |
2266 | ret = udma_tisci_tx_channel_config(uc); |
2267 | break; |
2268 | case DMA_DEV_TO_MEM: |
2269 | /* Slave transfer synchronized - dev to mem (RX) trasnfer */ |
2270 | dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n" , __func__, |
2271 | uc->id); |
2272 | |
2273 | ret = udma_alloc_rx_resources(uc); |
2274 | if (ret) |
2275 | goto err_cleanup; |
2276 | |
2277 | uc->config.src_thread = uc->config.remote_thread_id; |
2278 | uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | |
2279 | K3_PSIL_DST_THREAD_ID_OFFSET; |
2280 | |
2281 | irq_ring = uc->rflow->r_ring; |
2282 | irq_udma_idx = soc_data->oes.udma_rchan + uc->rchan->id; |
2283 | |
2284 | ret = udma_tisci_rx_channel_config(uc); |
2285 | break; |
2286 | default: |
2287 | /* Can not happen */ |
2288 | dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n" , |
2289 | __func__, uc->id, uc->config.dir); |
2290 | ret = -EINVAL; |
2291 | goto err_cleanup; |
2292 | |
2293 | } |
2294 | |
2295 | /* check if the channel configuration was successful */ |
2296 | if (ret) |
2297 | goto err_res_free; |
2298 | |
2299 | if (udma_is_chan_running(uc)) { |
2300 | dev_warn(ud->dev, "chan%d: is running!\n" , uc->id); |
2301 | udma_reset_chan(uc, hard: false); |
2302 | if (udma_is_chan_running(uc)) { |
2303 | dev_err(ud->dev, "chan%d: won't stop!\n" , uc->id); |
2304 | ret = -EBUSY; |
2305 | goto err_res_free; |
2306 | } |
2307 | } |
2308 | |
2309 | /* PSI-L pairing */ |
2310 | ret = navss_psil_pair(ud, src_thread: uc->config.src_thread, dst_thread: uc->config.dst_thread); |
2311 | if (ret) { |
2312 | dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n" , |
2313 | uc->config.src_thread, uc->config.dst_thread); |
2314 | goto err_res_free; |
2315 | } |
2316 | |
2317 | uc->psil_paired = true; |
2318 | |
2319 | uc->irq_num_ring = k3_ringacc_get_ring_irq_num(ring: irq_ring); |
2320 | if (uc->irq_num_ring <= 0) { |
2321 | dev_err(ud->dev, "Failed to get ring irq (index: %u)\n" , |
2322 | k3_ringacc_get_ring_id(irq_ring)); |
2323 | ret = -EINVAL; |
2324 | goto err_psi_free; |
2325 | } |
2326 | |
2327 | ret = request_irq(irq: uc->irq_num_ring, handler: udma_ring_irq_handler, |
2328 | IRQF_TRIGGER_HIGH, name: uc->name, dev: uc); |
2329 | if (ret) { |
2330 | dev_err(ud->dev, "chan%d: ring irq request failed\n" , uc->id); |
2331 | goto err_irq_free; |
2332 | } |
2333 | |
2334 | /* Event from UDMA (TR events) only needed for slave TR mode channels */ |
2335 | if (is_slave_direction(direction: uc->config.dir) && !uc->config.pkt_mode) { |
2336 | uc->irq_num_udma = msi_get_virq(dev: ud->dev, index: irq_udma_idx); |
2337 | if (uc->irq_num_udma <= 0) { |
2338 | dev_err(ud->dev, "Failed to get udma irq (index: %u)\n" , |
2339 | irq_udma_idx); |
2340 | free_irq(uc->irq_num_ring, uc); |
2341 | ret = -EINVAL; |
2342 | goto err_irq_free; |
2343 | } |
2344 | |
2345 | ret = request_irq(irq: uc->irq_num_udma, handler: udma_udma_irq_handler, flags: 0, |
2346 | name: uc->name, dev: uc); |
2347 | if (ret) { |
2348 | dev_err(ud->dev, "chan%d: UDMA irq request failed\n" , |
2349 | uc->id); |
2350 | free_irq(uc->irq_num_ring, uc); |
2351 | goto err_irq_free; |
2352 | } |
2353 | } else { |
2354 | uc->irq_num_udma = 0; |
2355 | } |
2356 | |
2357 | udma_reset_rings(uc); |
2358 | |
2359 | return 0; |
2360 | |
2361 | err_irq_free: |
2362 | uc->irq_num_ring = 0; |
2363 | uc->irq_num_udma = 0; |
2364 | err_psi_free: |
2365 | navss_psil_unpair(ud, src_thread: uc->config.src_thread, dst_thread: uc->config.dst_thread); |
2366 | uc->psil_paired = false; |
2367 | err_res_free: |
2368 | udma_free_tx_resources(uc); |
2369 | udma_free_rx_resources(uc); |
2370 | err_cleanup: |
2371 | udma_reset_uchan(uc); |
2372 | |
2373 | if (uc->use_dma_pool) { |
2374 | dma_pool_destroy(pool: uc->hdesc_pool); |
2375 | uc->use_dma_pool = false; |
2376 | } |
2377 | |
2378 | return ret; |
2379 | } |
2380 | |
2381 | static int bcdma_alloc_chan_resources(struct dma_chan *chan) |
2382 | { |
2383 | struct udma_chan *uc = to_udma_chan(c: chan); |
2384 | struct udma_dev *ud = to_udma_dev(d: chan->device); |
2385 | const struct udma_oes_offsets *oes = &ud->soc_data->oes; |
2386 | u32 irq_udma_idx, irq_ring_idx; |
2387 | int ret; |
2388 | |
2389 | /* Only TR mode is supported */ |
2390 | uc->config.pkt_mode = false; |
2391 | |
2392 | /* |
2393 | * Make sure that the completion is in a known state: |
2394 | * No teardown, the channel is idle |
2395 | */ |
2396 | reinit_completion(x: &uc->teardown_completed); |
2397 | complete_all(&uc->teardown_completed); |
2398 | uc->state = UDMA_CHAN_IS_IDLE; |
2399 | |
2400 | switch (uc->config.dir) { |
2401 | case DMA_MEM_TO_MEM: |
2402 | /* Non synchronized - mem to mem type of transfer */ |
2403 | dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n" , __func__, |
2404 | uc->id); |
2405 | |
2406 | ret = bcdma_alloc_bchan_resources(uc); |
2407 | if (ret) |
2408 | return ret; |
2409 | |
2410 | irq_ring_idx = uc->bchan->id + oes->bcdma_bchan_ring; |
2411 | irq_udma_idx = uc->bchan->id + oes->bcdma_bchan_data; |
2412 | |
2413 | ret = bcdma_tisci_m2m_channel_config(uc); |
2414 | break; |
2415 | case DMA_MEM_TO_DEV: |
2416 | /* Slave transfer synchronized - mem to dev (TX) trasnfer */ |
2417 | dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n" , __func__, |
2418 | uc->id); |
2419 | |
2420 | ret = udma_alloc_tx_resources(uc); |
2421 | if (ret) { |
2422 | uc->config.remote_thread_id = -1; |
2423 | return ret; |
2424 | } |
2425 | |
2426 | uc->config.src_thread = ud->psil_base + uc->tchan->id; |
2427 | uc->config.dst_thread = uc->config.remote_thread_id; |
2428 | uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; |
2429 | |
2430 | irq_ring_idx = uc->tchan->id + oes->bcdma_tchan_ring; |
2431 | irq_udma_idx = uc->tchan->id + oes->bcdma_tchan_data; |
2432 | |
2433 | ret = bcdma_tisci_tx_channel_config(uc); |
2434 | break; |
2435 | case DMA_DEV_TO_MEM: |
2436 | /* Slave transfer synchronized - dev to mem (RX) trasnfer */ |
2437 | dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n" , __func__, |
2438 | uc->id); |
2439 | |
2440 | ret = udma_alloc_rx_resources(uc); |
2441 | if (ret) { |
2442 | uc->config.remote_thread_id = -1; |
2443 | return ret; |
2444 | } |
2445 | |
2446 | uc->config.src_thread = uc->config.remote_thread_id; |
2447 | uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | |
2448 | K3_PSIL_DST_THREAD_ID_OFFSET; |
2449 | |
2450 | irq_ring_idx = uc->rchan->id + oes->bcdma_rchan_ring; |
2451 | irq_udma_idx = uc->rchan->id + oes->bcdma_rchan_data; |
2452 | |
2453 | ret = bcdma_tisci_rx_channel_config(uc); |
2454 | break; |
2455 | default: |
2456 | /* Can not happen */ |
2457 | dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n" , |
2458 | __func__, uc->id, uc->config.dir); |
2459 | return -EINVAL; |
2460 | } |
2461 | |
2462 | /* check if the channel configuration was successful */ |
2463 | if (ret) |
2464 | goto err_res_free; |
2465 | |
2466 | if (udma_is_chan_running(uc)) { |
2467 | dev_warn(ud->dev, "chan%d: is running!\n" , uc->id); |
2468 | udma_reset_chan(uc, hard: false); |
2469 | if (udma_is_chan_running(uc)) { |
2470 | dev_err(ud->dev, "chan%d: won't stop!\n" , uc->id); |
2471 | ret = -EBUSY; |
2472 | goto err_res_free; |
2473 | } |
2474 | } |
2475 | |
2476 | uc->dma_dev = dmaengine_get_dma_device(chan); |
2477 | if (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type) { |
2478 | uc->config.hdesc_size = cppi5_trdesc_calc_size( |
2479 | tr_count: sizeof(struct cppi5_tr_type15_t), tr_size: 2); |
2480 | |
2481 | uc->hdesc_pool = dma_pool_create(name: uc->name, dev: ud->ddev.dev, |
2482 | size: uc->config.hdesc_size, |
2483 | align: ud->desc_align, |
2484 | allocation: 0); |
2485 | if (!uc->hdesc_pool) { |
2486 | dev_err(ud->ddev.dev, |
2487 | "Descriptor pool allocation failed\n" ); |
2488 | uc->use_dma_pool = false; |
2489 | ret = -ENOMEM; |
2490 | goto err_res_free; |
2491 | } |
2492 | |
2493 | uc->use_dma_pool = true; |
2494 | } else if (uc->config.dir != DMA_MEM_TO_MEM) { |
2495 | /* PSI-L pairing */ |
2496 | ret = navss_psil_pair(ud, src_thread: uc->config.src_thread, |
2497 | dst_thread: uc->config.dst_thread); |
2498 | if (ret) { |
2499 | dev_err(ud->dev, |
2500 | "PSI-L pairing failed: 0x%04x -> 0x%04x\n" , |
2501 | uc->config.src_thread, uc->config.dst_thread); |
2502 | goto err_res_free; |
2503 | } |
2504 | |
2505 | uc->psil_paired = true; |
2506 | } |
2507 | |
2508 | uc->irq_num_ring = msi_get_virq(dev: ud->dev, index: irq_ring_idx); |
2509 | if (uc->irq_num_ring <= 0) { |
2510 | dev_err(ud->dev, "Failed to get ring irq (index: %u)\n" , |
2511 | irq_ring_idx); |
2512 | ret = -EINVAL; |
2513 | goto err_psi_free; |
2514 | } |
2515 | |
2516 | ret = request_irq(irq: uc->irq_num_ring, handler: udma_ring_irq_handler, |
2517 | IRQF_TRIGGER_HIGH, name: uc->name, dev: uc); |
2518 | if (ret) { |
2519 | dev_err(ud->dev, "chan%d: ring irq request failed\n" , uc->id); |
2520 | goto err_irq_free; |
2521 | } |
2522 | |
2523 | /* Event from BCDMA (TR events) only needed for slave channels */ |
2524 | if (is_slave_direction(direction: uc->config.dir)) { |
2525 | uc->irq_num_udma = msi_get_virq(dev: ud->dev, index: irq_udma_idx); |
2526 | if (uc->irq_num_udma <= 0) { |
2527 | dev_err(ud->dev, "Failed to get bcdma irq (index: %u)\n" , |
2528 | irq_udma_idx); |
2529 | free_irq(uc->irq_num_ring, uc); |
2530 | ret = -EINVAL; |
2531 | goto err_irq_free; |
2532 | } |
2533 | |
2534 | ret = request_irq(irq: uc->irq_num_udma, handler: udma_udma_irq_handler, flags: 0, |
2535 | name: uc->name, dev: uc); |
2536 | if (ret) { |
2537 | dev_err(ud->dev, "chan%d: BCDMA irq request failed\n" , |
2538 | uc->id); |
2539 | free_irq(uc->irq_num_ring, uc); |
2540 | goto err_irq_free; |
2541 | } |
2542 | } else { |
2543 | uc->irq_num_udma = 0; |
2544 | } |
2545 | |
2546 | udma_reset_rings(uc); |
2547 | |
2548 | INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work, |
2549 | udma_check_tx_completion); |
2550 | return 0; |
2551 | |
2552 | err_irq_free: |
2553 | uc->irq_num_ring = 0; |
2554 | uc->irq_num_udma = 0; |
2555 | err_psi_free: |
2556 | if (uc->psil_paired) |
2557 | navss_psil_unpair(ud, src_thread: uc->config.src_thread, |
2558 | dst_thread: uc->config.dst_thread); |
2559 | uc->psil_paired = false; |
2560 | err_res_free: |
2561 | bcdma_free_bchan_resources(uc); |
2562 | udma_free_tx_resources(uc); |
2563 | udma_free_rx_resources(uc); |
2564 | |
2565 | udma_reset_uchan(uc); |
2566 | |
2567 | if (uc->use_dma_pool) { |
2568 | dma_pool_destroy(pool: uc->hdesc_pool); |
2569 | uc->use_dma_pool = false; |
2570 | } |
2571 | |
2572 | return ret; |
2573 | } |
2574 | |
2575 | static int bcdma_router_config(struct dma_chan *chan) |
2576 | { |
2577 | struct k3_event_route_data *router_data = chan->route_data; |
2578 | struct udma_chan *uc = to_udma_chan(c: chan); |
2579 | u32 trigger_event; |
2580 | |
2581 | if (!uc->bchan) |
2582 | return -EINVAL; |
2583 | |
2584 | if (uc->config.tr_trigger_type != 1 && uc->config.tr_trigger_type != 2) |
2585 | return -EINVAL; |
2586 | |
2587 | trigger_event = uc->ud->soc_data->bcdma_trigger_event_offset; |
2588 | trigger_event += (uc->bchan->id * 2) + uc->config.tr_trigger_type - 1; |
2589 | |
2590 | return router_data->set_event(router_data->priv, trigger_event); |
2591 | } |
2592 | |
2593 | static int pktdma_alloc_chan_resources(struct dma_chan *chan) |
2594 | { |
2595 | struct udma_chan *uc = to_udma_chan(c: chan); |
2596 | struct udma_dev *ud = to_udma_dev(d: chan->device); |
2597 | const struct udma_oes_offsets *oes = &ud->soc_data->oes; |
2598 | u32 irq_ring_idx; |
2599 | int ret; |
2600 | |
2601 | /* |
2602 | * Make sure that the completion is in a known state: |
2603 | * No teardown, the channel is idle |
2604 | */ |
2605 | reinit_completion(x: &uc->teardown_completed); |
2606 | complete_all(&uc->teardown_completed); |
2607 | uc->state = UDMA_CHAN_IS_IDLE; |
2608 | |
2609 | switch (uc->config.dir) { |
2610 | case DMA_MEM_TO_DEV: |
2611 | /* Slave transfer synchronized - mem to dev (TX) trasnfer */ |
2612 | dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n" , __func__, |
2613 | uc->id); |
2614 | |
2615 | ret = udma_alloc_tx_resources(uc); |
2616 | if (ret) { |
2617 | uc->config.remote_thread_id = -1; |
2618 | return ret; |
2619 | } |
2620 | |
2621 | uc->config.src_thread = ud->psil_base + uc->tchan->id; |
2622 | uc->config.dst_thread = uc->config.remote_thread_id; |
2623 | uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; |
2624 | |
2625 | irq_ring_idx = uc->tchan->tflow_id + oes->pktdma_tchan_flow; |
2626 | |
2627 | ret = pktdma_tisci_tx_channel_config(uc); |
2628 | break; |
2629 | case DMA_DEV_TO_MEM: |
2630 | /* Slave transfer synchronized - dev to mem (RX) trasnfer */ |
2631 | dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n" , __func__, |
2632 | uc->id); |
2633 | |
2634 | ret = udma_alloc_rx_resources(uc); |
2635 | if (ret) { |
2636 | uc->config.remote_thread_id = -1; |
2637 | return ret; |
2638 | } |
2639 | |
2640 | uc->config.src_thread = uc->config.remote_thread_id; |
2641 | uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | |
2642 | K3_PSIL_DST_THREAD_ID_OFFSET; |
2643 | |
2644 | irq_ring_idx = uc->rflow->id + oes->pktdma_rchan_flow; |
2645 | |
2646 | ret = pktdma_tisci_rx_channel_config(uc); |
2647 | break; |
2648 | default: |
2649 | /* Can not happen */ |
2650 | dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n" , |
2651 | __func__, uc->id, uc->config.dir); |
2652 | return -EINVAL; |
2653 | } |
2654 | |
2655 | /* check if the channel configuration was successful */ |
2656 | if (ret) |
2657 | goto err_res_free; |
2658 | |
2659 | if (udma_is_chan_running(uc)) { |
2660 | dev_warn(ud->dev, "chan%d: is running!\n" , uc->id); |
2661 | udma_reset_chan(uc, hard: false); |
2662 | if (udma_is_chan_running(uc)) { |
2663 | dev_err(ud->dev, "chan%d: won't stop!\n" , uc->id); |
2664 | ret = -EBUSY; |
2665 | goto err_res_free; |
2666 | } |
2667 | } |
2668 | |
2669 | uc->dma_dev = dmaengine_get_dma_device(chan); |
2670 | uc->hdesc_pool = dma_pool_create(name: uc->name, dev: uc->dma_dev, |
2671 | size: uc->config.hdesc_size, align: ud->desc_align, |
2672 | allocation: 0); |
2673 | if (!uc->hdesc_pool) { |
2674 | dev_err(ud->ddev.dev, |
2675 | "Descriptor pool allocation failed\n" ); |
2676 | uc->use_dma_pool = false; |
2677 | ret = -ENOMEM; |
2678 | goto err_res_free; |
2679 | } |
2680 | |
2681 | uc->use_dma_pool = true; |
2682 | |
2683 | /* PSI-L pairing */ |
2684 | ret = navss_psil_pair(ud, src_thread: uc->config.src_thread, dst_thread: uc->config.dst_thread); |
2685 | if (ret) { |
2686 | dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n" , |
2687 | uc->config.src_thread, uc->config.dst_thread); |
2688 | goto err_res_free; |
2689 | } |
2690 | |
2691 | uc->psil_paired = true; |
2692 | |
2693 | uc->irq_num_ring = msi_get_virq(dev: ud->dev, index: irq_ring_idx); |
2694 | if (uc->irq_num_ring <= 0) { |
2695 | dev_err(ud->dev, "Failed to get ring irq (index: %u)\n" , |
2696 | irq_ring_idx); |
2697 | ret = -EINVAL; |
2698 | goto err_psi_free; |
2699 | } |
2700 | |
2701 | ret = request_irq(irq: uc->irq_num_ring, handler: udma_ring_irq_handler, |
2702 | IRQF_TRIGGER_HIGH, name: uc->name, dev: uc); |
2703 | if (ret) { |
2704 | dev_err(ud->dev, "chan%d: ring irq request failed\n" , uc->id); |
2705 | goto err_irq_free; |
2706 | } |
2707 | |
2708 | uc->irq_num_udma = 0; |
2709 | |
2710 | udma_reset_rings(uc); |
2711 | |
2712 | INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work, |
2713 | udma_check_tx_completion); |
2714 | |
2715 | if (uc->tchan) |
2716 | dev_dbg(ud->dev, |
2717 | "chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n" , |
2718 | uc->id, uc->tchan->id, uc->tchan->tflow_id, |
2719 | uc->config.remote_thread_id); |
2720 | else if (uc->rchan) |
2721 | dev_dbg(ud->dev, |
2722 | "chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n" , |
2723 | uc->id, uc->rchan->id, uc->rflow->id, |
2724 | uc->config.remote_thread_id); |
2725 | return 0; |
2726 | |
2727 | err_irq_free: |
2728 | uc->irq_num_ring = 0; |
2729 | err_psi_free: |
2730 | navss_psil_unpair(ud, src_thread: uc->config.src_thread, dst_thread: uc->config.dst_thread); |
2731 | uc->psil_paired = false; |
2732 | err_res_free: |
2733 | udma_free_tx_resources(uc); |
2734 | udma_free_rx_resources(uc); |
2735 | |
2736 | udma_reset_uchan(uc); |
2737 | |
2738 | dma_pool_destroy(pool: uc->hdesc_pool); |
2739 | uc->use_dma_pool = false; |
2740 | |
2741 | return ret; |
2742 | } |
2743 | |
2744 | static int udma_slave_config(struct dma_chan *chan, |
2745 | struct dma_slave_config *cfg) |
2746 | { |
2747 | struct udma_chan *uc = to_udma_chan(c: chan); |
2748 | |
2749 | memcpy(&uc->cfg, cfg, sizeof(uc->cfg)); |
2750 | |
2751 | return 0; |
2752 | } |
2753 | |
2754 | static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc, |
2755 | size_t tr_size, int tr_count, |
2756 | enum dma_transfer_direction dir) |
2757 | { |
2758 | struct udma_hwdesc *hwdesc; |
2759 | struct cppi5_desc_hdr_t *tr_desc; |
2760 | struct udma_desc *d; |
2761 | u32 reload_count = 0; |
2762 | u32 ring_id; |
2763 | |
2764 | switch (tr_size) { |
2765 | case 16: |
2766 | case 32: |
2767 | case 64: |
2768 | case 128: |
2769 | break; |
2770 | default: |
2771 | dev_err(uc->ud->dev, "Unsupported TR size of %zu\n" , tr_size); |
2772 | return NULL; |
2773 | } |
2774 | |
2775 | /* We have only one descriptor containing multiple TRs */ |
2776 | d = kzalloc(size: sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT); |
2777 | if (!d) |
2778 | return NULL; |
2779 | |
2780 | d->sglen = tr_count; |
2781 | |
2782 | d->hwdesc_count = 1; |
2783 | hwdesc = &d->hwdesc[0]; |
2784 | |
2785 | /* Allocate memory for DMA ring descriptor */ |
2786 | if (uc->use_dma_pool) { |
2787 | hwdesc->cppi5_desc_size = uc->config.hdesc_size; |
2788 | hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(pool: uc->hdesc_pool, |
2789 | GFP_NOWAIT, |
2790 | handle: &hwdesc->cppi5_desc_paddr); |
2791 | } else { |
2792 | hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_count: tr_size, |
2793 | tr_size: tr_count); |
2794 | hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size, |
2795 | uc->ud->desc_align); |
2796 | hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(dev: uc->ud->dev, |
2797 | size: hwdesc->cppi5_desc_size, |
2798 | dma_handle: &hwdesc->cppi5_desc_paddr, |
2799 | GFP_NOWAIT); |
2800 | } |
2801 | |
2802 | if (!hwdesc->cppi5_desc_vaddr) { |
2803 | kfree(objp: d); |
2804 | return NULL; |
2805 | } |
2806 | |
2807 | /* Start of the TR req records */ |
2808 | hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size; |
2809 | /* Start address of the TR response array */ |
2810 | hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count; |
2811 | |
2812 | tr_desc = hwdesc->cppi5_desc_vaddr; |
2813 | |
2814 | if (uc->cyclic) |
2815 | reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE; |
2816 | |
2817 | if (dir == DMA_DEV_TO_MEM) |
2818 | ring_id = k3_ringacc_get_ring_id(ring: uc->rflow->r_ring); |
2819 | else |
2820 | ring_id = k3_ringacc_get_ring_id(ring: uc->tchan->tc_ring); |
2821 | |
2822 | cppi5_trdesc_init(desc_hdr: tr_desc, tr_count, tr_size, reload_idx: 0, reload_count); |
2823 | cppi5_desc_set_pktids(desc_hdr: tr_desc, pkt_id: uc->id, |
2824 | CPPI5_INFO1_DESC_FLOWID_DEFAULT); |
2825 | cppi5_desc_set_retpolicy(desc_hdr: tr_desc, flags: 0, return_ring_id: ring_id); |
2826 | |
2827 | return d; |
2828 | } |
2829 | |
2830 | /** |
2831 | * udma_get_tr_counters - calculate TR counters for a given length |
2832 | * @len: Length of the trasnfer |
2833 | * @align_to: Preferred alignment |
2834 | * @tr0_cnt0: First TR icnt0 |
2835 | * @tr0_cnt1: First TR icnt1 |
2836 | * @tr1_cnt0: Second (if used) TR icnt0 |
2837 | * |
2838 | * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated |
2839 | * For len >= SZ_64K two TRs are used in a simple way: |
2840 | * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1) |
2841 | * Second TR: the remaining length (tr1_cnt0) |
2842 | * |
2843 | * Returns the number of TRs the length needs (1 or 2) |
2844 | * -EINVAL if the length can not be supported |
2845 | */ |
2846 | static int udma_get_tr_counters(size_t len, unsigned long align_to, |
2847 | u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0) |
2848 | { |
2849 | if (len < SZ_64K) { |
2850 | *tr0_cnt0 = len; |
2851 | *tr0_cnt1 = 1; |
2852 | |
2853 | return 1; |
2854 | } |
2855 | |
2856 | if (align_to > 3) |
2857 | align_to = 3; |
2858 | |
2859 | realign: |
2860 | *tr0_cnt0 = SZ_64K - BIT(align_to); |
2861 | if (len / *tr0_cnt0 >= SZ_64K) { |
2862 | if (align_to) { |
2863 | align_to--; |
2864 | goto realign; |
2865 | } |
2866 | return -EINVAL; |
2867 | } |
2868 | |
2869 | *tr0_cnt1 = len / *tr0_cnt0; |
2870 | *tr1_cnt0 = len % *tr0_cnt0; |
2871 | |
2872 | return 2; |
2873 | } |
2874 | |
2875 | static struct udma_desc * |
2876 | udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl, |
2877 | unsigned int sglen, enum dma_transfer_direction dir, |
2878 | unsigned long tx_flags, void *context) |
2879 | { |
2880 | struct scatterlist *sgent; |
2881 | struct udma_desc *d; |
2882 | struct cppi5_tr_type1_t *tr_req = NULL; |
2883 | u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; |
2884 | unsigned int i; |
2885 | size_t tr_size; |
2886 | int num_tr = 0; |
2887 | int tr_idx = 0; |
2888 | u64 asel; |
2889 | |
2890 | /* estimate the number of TRs we will need */ |
2891 | for_each_sg(sgl, sgent, sglen, i) { |
2892 | if (sg_dma_len(sgent) < SZ_64K) |
2893 | num_tr++; |
2894 | else |
2895 | num_tr += 2; |
2896 | } |
2897 | |
2898 | /* Now allocate and setup the descriptor. */ |
2899 | tr_size = sizeof(struct cppi5_tr_type1_t); |
2900 | d = udma_alloc_tr_desc(uc, tr_size, tr_count: num_tr, dir); |
2901 | if (!d) |
2902 | return NULL; |
2903 | |
2904 | d->sglen = sglen; |
2905 | |
2906 | if (uc->ud->match_data->type == DMA_TYPE_UDMA) |
2907 | asel = 0; |
2908 | else |
2909 | asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; |
2910 | |
2911 | tr_req = d->hwdesc[0].tr_req_base; |
2912 | for_each_sg(sgl, sgent, sglen, i) { |
2913 | dma_addr_t sg_addr = sg_dma_address(sgent); |
2914 | |
2915 | num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr), |
2916 | tr0_cnt0: &tr0_cnt0, tr0_cnt1: &tr0_cnt1, tr1_cnt0: &tr1_cnt0); |
2917 | if (num_tr < 0) { |
2918 | dev_err(uc->ud->dev, "size %u is not supported\n" , |
2919 | sg_dma_len(sgent)); |
2920 | udma_free_hwdesc(uc, d); |
2921 | kfree(objp: d); |
2922 | return NULL; |
2923 | } |
2924 | |
2925 | cppi5_tr_init(flags: &tr_req[tr_idx].flags, type: CPPI5_TR_TYPE1, static_tr: false, |
2926 | wait: false, event_size: CPPI5_TR_EVENT_SIZE_COMPLETION, cmd_id: 0); |
2927 | cppi5_tr_csf_set(flags: &tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT); |
2928 | |
2929 | sg_addr |= asel; |
2930 | tr_req[tr_idx].addr = sg_addr; |
2931 | tr_req[tr_idx].icnt0 = tr0_cnt0; |
2932 | tr_req[tr_idx].icnt1 = tr0_cnt1; |
2933 | tr_req[tr_idx].dim1 = tr0_cnt0; |
2934 | tr_idx++; |
2935 | |
2936 | if (num_tr == 2) { |
2937 | cppi5_tr_init(flags: &tr_req[tr_idx].flags, type: CPPI5_TR_TYPE1, |
2938 | static_tr: false, wait: false, |
2939 | event_size: CPPI5_TR_EVENT_SIZE_COMPLETION, cmd_id: 0); |
2940 | cppi5_tr_csf_set(flags: &tr_req[tr_idx].flags, |
2941 | CPPI5_TR_CSF_SUPR_EVT); |
2942 | |
2943 | tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0; |
2944 | tr_req[tr_idx].icnt0 = tr1_cnt0; |
2945 | tr_req[tr_idx].icnt1 = 1; |
2946 | tr_req[tr_idx].dim1 = tr1_cnt0; |
2947 | tr_idx++; |
2948 | } |
2949 | |
2950 | d->residue += sg_dma_len(sgent); |
2951 | } |
2952 | |
2953 | cppi5_tr_csf_set(flags: &tr_req[tr_idx - 1].flags, |
2954 | CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP); |
2955 | |
2956 | return d; |
2957 | } |
2958 | |
2959 | static struct udma_desc * |
2960 | udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl, |
2961 | unsigned int sglen, |
2962 | enum dma_transfer_direction dir, |
2963 | unsigned long tx_flags, void *context) |
2964 | { |
2965 | struct scatterlist *sgent; |
2966 | struct cppi5_tr_type15_t *tr_req = NULL; |
2967 | enum dma_slave_buswidth dev_width; |
2968 | u32 csf = CPPI5_TR_CSF_SUPR_EVT; |
2969 | u16 tr_cnt0, tr_cnt1; |
2970 | dma_addr_t dev_addr; |
2971 | struct udma_desc *d; |
2972 | unsigned int i; |
2973 | size_t tr_size, sg_len; |
2974 | int num_tr = 0; |
2975 | int tr_idx = 0; |
2976 | u32 burst, trigger_size, port_window; |
2977 | u64 asel; |
2978 | |
2979 | if (dir == DMA_DEV_TO_MEM) { |
2980 | dev_addr = uc->cfg.src_addr; |
2981 | dev_width = uc->cfg.src_addr_width; |
2982 | burst = uc->cfg.src_maxburst; |
2983 | port_window = uc->cfg.src_port_window_size; |
2984 | } else if (dir == DMA_MEM_TO_DEV) { |
2985 | dev_addr = uc->cfg.dst_addr; |
2986 | dev_width = uc->cfg.dst_addr_width; |
2987 | burst = uc->cfg.dst_maxburst; |
2988 | port_window = uc->cfg.dst_port_window_size; |
2989 | } else { |
2990 | dev_err(uc->ud->dev, "%s: bad direction?\n" , __func__); |
2991 | return NULL; |
2992 | } |
2993 | |
2994 | if (!burst) |
2995 | burst = 1; |
2996 | |
2997 | if (port_window) { |
2998 | if (port_window != burst) { |
2999 | dev_err(uc->ud->dev, |
3000 | "The burst must be equal to port_window\n" ); |
3001 | return NULL; |
3002 | } |
3003 | |
3004 | tr_cnt0 = dev_width * port_window; |
3005 | tr_cnt1 = 1; |
3006 | } else { |
3007 | tr_cnt0 = dev_width; |
3008 | tr_cnt1 = burst; |
3009 | } |
3010 | trigger_size = tr_cnt0 * tr_cnt1; |
3011 | |
3012 | /* estimate the number of TRs we will need */ |
3013 | for_each_sg(sgl, sgent, sglen, i) { |
3014 | sg_len = sg_dma_len(sgent); |
3015 | |
3016 | if (sg_len % trigger_size) { |
3017 | dev_err(uc->ud->dev, |
3018 | "Not aligned SG entry (%zu for %u)\n" , sg_len, |
3019 | trigger_size); |
3020 | return NULL; |
3021 | } |
3022 | |
3023 | if (sg_len / trigger_size < SZ_64K) |
3024 | num_tr++; |
3025 | else |
3026 | num_tr += 2; |
3027 | } |
3028 | |
3029 | /* Now allocate and setup the descriptor. */ |
3030 | tr_size = sizeof(struct cppi5_tr_type15_t); |
3031 | d = udma_alloc_tr_desc(uc, tr_size, tr_count: num_tr, dir); |
3032 | if (!d) |
3033 | return NULL; |
3034 | |
3035 | d->sglen = sglen; |
3036 | |
3037 | if (uc->ud->match_data->type == DMA_TYPE_UDMA) { |
3038 | asel = 0; |
3039 | csf |= CPPI5_TR_CSF_EOL_ICNT0; |
3040 | } else { |
3041 | asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; |
3042 | dev_addr |= asel; |
3043 | } |
3044 | |
3045 | tr_req = d->hwdesc[0].tr_req_base; |
3046 | for_each_sg(sgl, sgent, sglen, i) { |
3047 | u16 tr0_cnt2, tr0_cnt3, tr1_cnt2; |
3048 | dma_addr_t sg_addr = sg_dma_address(sgent); |
3049 | |
3050 | sg_len = sg_dma_len(sgent); |
3051 | num_tr = udma_get_tr_counters(len: sg_len / trigger_size, align_to: 0, |
3052 | tr0_cnt0: &tr0_cnt2, tr0_cnt1: &tr0_cnt3, tr1_cnt0: &tr1_cnt2); |
3053 | if (num_tr < 0) { |
3054 | dev_err(uc->ud->dev, "size %zu is not supported\n" , |
3055 | sg_len); |
3056 | udma_free_hwdesc(uc, d); |
3057 | kfree(objp: d); |
3058 | return NULL; |
3059 | } |
3060 | |
3061 | cppi5_tr_init(flags: &tr_req[tr_idx].flags, type: CPPI5_TR_TYPE15, static_tr: false, |
3062 | wait: true, event_size: CPPI5_TR_EVENT_SIZE_COMPLETION, cmd_id: 0); |
3063 | cppi5_tr_csf_set(flags: &tr_req[tr_idx].flags, csf); |
3064 | cppi5_tr_set_trigger(flags: &tr_req[tr_idx].flags, |
3065 | trigger0: uc->config.tr_trigger_type, |
3066 | trigger0_type: CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, trigger1: 0, trigger1_type: 0); |
3067 | |
3068 | sg_addr |= asel; |
3069 | if (dir == DMA_DEV_TO_MEM) { |
3070 | tr_req[tr_idx].addr = dev_addr; |
3071 | tr_req[tr_idx].icnt0 = tr_cnt0; |
3072 | tr_req[tr_idx].icnt1 = tr_cnt1; |
3073 | tr_req[tr_idx].icnt2 = tr0_cnt2; |
3074 | tr_req[tr_idx].icnt3 = tr0_cnt3; |
3075 | tr_req[tr_idx].dim1 = (-1) * tr_cnt0; |
3076 | |
3077 | tr_req[tr_idx].daddr = sg_addr; |
3078 | tr_req[tr_idx].dicnt0 = tr_cnt0; |
3079 | tr_req[tr_idx].dicnt1 = tr_cnt1; |
3080 | tr_req[tr_idx].dicnt2 = tr0_cnt2; |
3081 | tr_req[tr_idx].dicnt3 = tr0_cnt3; |
3082 | tr_req[tr_idx].ddim1 = tr_cnt0; |
3083 | tr_req[tr_idx].ddim2 = trigger_size; |
3084 | tr_req[tr_idx].ddim3 = trigger_size * tr0_cnt2; |
3085 | } else { |
3086 | tr_req[tr_idx].addr = sg_addr; |
3087 | tr_req[tr_idx].icnt0 = tr_cnt0; |
3088 | tr_req[tr_idx].icnt1 = tr_cnt1; |
3089 | tr_req[tr_idx].icnt2 = tr0_cnt2; |
3090 | tr_req[tr_idx].icnt3 = tr0_cnt3; |
3091 | tr_req[tr_idx].dim1 = tr_cnt0; |
3092 | tr_req[tr_idx].dim2 = trigger_size; |
3093 | tr_req[tr_idx].dim3 = trigger_size * tr0_cnt2; |
3094 | |
3095 | tr_req[tr_idx].daddr = dev_addr; |
3096 | tr_req[tr_idx].dicnt0 = tr_cnt0; |
3097 | tr_req[tr_idx].dicnt1 = tr_cnt1; |
3098 | tr_req[tr_idx].dicnt2 = tr0_cnt2; |
3099 | tr_req[tr_idx].dicnt3 = tr0_cnt3; |
3100 | tr_req[tr_idx].ddim1 = (-1) * tr_cnt0; |
3101 | } |
3102 | |
3103 | tr_idx++; |
3104 | |
3105 | if (num_tr == 2) { |
3106 | cppi5_tr_init(flags: &tr_req[tr_idx].flags, type: CPPI5_TR_TYPE15, |
3107 | static_tr: false, wait: true, |
3108 | event_size: CPPI5_TR_EVENT_SIZE_COMPLETION, cmd_id: 0); |
3109 | cppi5_tr_csf_set(flags: &tr_req[tr_idx].flags, csf); |
3110 | cppi5_tr_set_trigger(flags: &tr_req[tr_idx].flags, |
3111 | trigger0: uc->config.tr_trigger_type, |
3112 | trigger0_type: CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, |
3113 | trigger1: 0, trigger1_type: 0); |
3114 | |
3115 | sg_addr += trigger_size * tr0_cnt2 * tr0_cnt3; |
3116 | if (dir == DMA_DEV_TO_MEM) { |
3117 | tr_req[tr_idx].addr = dev_addr; |
3118 | tr_req[tr_idx].icnt0 = tr_cnt0; |
3119 | tr_req[tr_idx].icnt1 = tr_cnt1; |
3120 | tr_req[tr_idx].icnt2 = tr1_cnt2; |
3121 | tr_req[tr_idx].icnt3 = 1; |
3122 | tr_req[tr_idx].dim1 = (-1) * tr_cnt0; |
3123 | |
3124 | tr_req[tr_idx].daddr = sg_addr; |
3125 | tr_req[tr_idx].dicnt0 = tr_cnt0; |
3126 | tr_req[tr_idx].dicnt1 = tr_cnt1; |
3127 | tr_req[tr_idx].dicnt2 = tr1_cnt2; |
3128 | tr_req[tr_idx].dicnt3 = 1; |
3129 | tr_req[tr_idx].ddim1 = tr_cnt0; |
3130 | tr_req[tr_idx].ddim2 = trigger_size; |
3131 | } else { |
3132 | tr_req[tr_idx].addr = sg_addr; |
3133 | tr_req[tr_idx].icnt0 = tr_cnt0; |
3134 | tr_req[tr_idx].icnt1 = tr_cnt1; |
3135 | tr_req[tr_idx].icnt2 = tr1_cnt2; |
3136 | tr_req[tr_idx].icnt3 = 1; |
3137 | tr_req[tr_idx].dim1 = tr_cnt0; |
3138 | tr_req[tr_idx].dim2 = trigger_size; |
3139 | |
3140 | tr_req[tr_idx].daddr = dev_addr; |
3141 | tr_req[tr_idx].dicnt0 = tr_cnt0; |
3142 | tr_req[tr_idx].dicnt1 = tr_cnt1; |
3143 | tr_req[tr_idx].dicnt2 = tr1_cnt2; |
3144 | tr_req[tr_idx].dicnt3 = 1; |
3145 | tr_req[tr_idx].ddim1 = (-1) * tr_cnt0; |
3146 | } |
3147 | tr_idx++; |
3148 | } |
3149 | |
3150 | d->residue += sg_len; |
3151 | } |
3152 | |
3153 | cppi5_tr_csf_set(flags: &tr_req[tr_idx - 1].flags, csf: csf | CPPI5_TR_CSF_EOP); |
3154 | |
3155 | return d; |
3156 | } |
3157 | |
3158 | static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d, |
3159 | enum dma_slave_buswidth dev_width, |
3160 | u16 elcnt) |
3161 | { |
3162 | if (uc->config.ep_type != PSIL_EP_PDMA_XY) |
3163 | return 0; |
3164 | |
3165 | /* Bus width translates to the element size (ES) */ |
3166 | switch (dev_width) { |
3167 | case DMA_SLAVE_BUSWIDTH_1_BYTE: |
3168 | d->static_tr.elsize = 0; |
3169 | break; |
3170 | case DMA_SLAVE_BUSWIDTH_2_BYTES: |
3171 | d->static_tr.elsize = 1; |
3172 | break; |
3173 | case DMA_SLAVE_BUSWIDTH_3_BYTES: |
3174 | d->static_tr.elsize = 2; |
3175 | break; |
3176 | case DMA_SLAVE_BUSWIDTH_4_BYTES: |
3177 | d->static_tr.elsize = 3; |
3178 | break; |
3179 | case DMA_SLAVE_BUSWIDTH_8_BYTES: |
3180 | d->static_tr.elsize = 4; |
3181 | break; |
3182 | default: /* not reached */ |
3183 | return -EINVAL; |
3184 | } |
3185 | |
3186 | d->static_tr.elcnt = elcnt; |
3187 | |
3188 | /* |
3189 | * PDMA must to close the packet when the channel is in packet mode. |
3190 | * For TR mode when the channel is not cyclic we also need PDMA to close |
3191 | * the packet otherwise the transfer will stall because PDMA holds on |
3192 | * the data it has received from the peripheral. |
3193 | */ |
3194 | if (uc->config.pkt_mode || !uc->cyclic) { |
3195 | unsigned int div = dev_width * elcnt; |
3196 | |
3197 | if (uc->cyclic) |
3198 | d->static_tr.bstcnt = d->residue / d->sglen / div; |
3199 | else |
3200 | d->static_tr.bstcnt = d->residue / div; |
3201 | |
3202 | if (uc->config.dir == DMA_DEV_TO_MEM && |
3203 | d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask) |
3204 | return -EINVAL; |
3205 | } else { |
3206 | d->static_tr.bstcnt = 0; |
3207 | } |
3208 | |
3209 | return 0; |
3210 | } |
3211 | |
3212 | static struct udma_desc * |
3213 | udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl, |
3214 | unsigned int sglen, enum dma_transfer_direction dir, |
3215 | unsigned long tx_flags, void *context) |
3216 | { |
3217 | struct scatterlist *sgent; |
3218 | struct cppi5_host_desc_t *h_desc = NULL; |
3219 | struct udma_desc *d; |
3220 | u32 ring_id; |
3221 | unsigned int i; |
3222 | u64 asel; |
3223 | |
3224 | d = kzalloc(struct_size(d, hwdesc, sglen), GFP_NOWAIT); |
3225 | if (!d) |
3226 | return NULL; |
3227 | |
3228 | d->sglen = sglen; |
3229 | d->hwdesc_count = sglen; |
3230 | |
3231 | if (dir == DMA_DEV_TO_MEM) |
3232 | ring_id = k3_ringacc_get_ring_id(ring: uc->rflow->r_ring); |
3233 | else |
3234 | ring_id = k3_ringacc_get_ring_id(ring: uc->tchan->tc_ring); |
3235 | |
3236 | if (uc->ud->match_data->type == DMA_TYPE_UDMA) |
3237 | asel = 0; |
3238 | else |
3239 | asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; |
3240 | |
3241 | for_each_sg(sgl, sgent, sglen, i) { |
3242 | struct udma_hwdesc *hwdesc = &d->hwdesc[i]; |
3243 | dma_addr_t sg_addr = sg_dma_address(sgent); |
3244 | struct cppi5_host_desc_t *desc; |
3245 | size_t sg_len = sg_dma_len(sgent); |
3246 | |
3247 | hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(pool: uc->hdesc_pool, |
3248 | GFP_NOWAIT, |
3249 | handle: &hwdesc->cppi5_desc_paddr); |
3250 | if (!hwdesc->cppi5_desc_vaddr) { |
3251 | dev_err(uc->ud->dev, |
3252 | "descriptor%d allocation failed\n" , i); |
3253 | |
3254 | udma_free_hwdesc(uc, d); |
3255 | kfree(objp: d); |
3256 | return NULL; |
3257 | } |
3258 | |
3259 | d->residue += sg_len; |
3260 | hwdesc->cppi5_desc_size = uc->config.hdesc_size; |
3261 | desc = hwdesc->cppi5_desc_vaddr; |
3262 | |
3263 | if (i == 0) { |
3264 | cppi5_hdesc_init(desc, flags: 0, psdata_size: 0); |
3265 | /* Flow and Packed ID */ |
3266 | cppi5_desc_set_pktids(desc_hdr: &desc->hdr, pkt_id: uc->id, |
3267 | CPPI5_INFO1_DESC_FLOWID_DEFAULT); |
3268 | cppi5_desc_set_retpolicy(desc_hdr: &desc->hdr, flags: 0, return_ring_id: ring_id); |
3269 | } else { |
3270 | cppi5_hdesc_reset_hbdesc(desc); |
3271 | cppi5_desc_set_retpolicy(desc_hdr: &desc->hdr, flags: 0, return_ring_id: 0xffff); |
3272 | } |
3273 | |
3274 | /* attach the sg buffer to the descriptor */ |
3275 | sg_addr |= asel; |
3276 | cppi5_hdesc_attach_buf(desc, buf: sg_addr, buf_data_len: sg_len, obuf: sg_addr, obuf_len: sg_len); |
3277 | |
3278 | /* Attach link as host buffer descriptor */ |
3279 | if (h_desc) |
3280 | cppi5_hdesc_link_hbdesc(desc: h_desc, |
3281 | hbuf_desc: hwdesc->cppi5_desc_paddr | asel); |
3282 | |
3283 | if (uc->ud->match_data->type == DMA_TYPE_PKTDMA || |
3284 | dir == DMA_MEM_TO_DEV) |
3285 | h_desc = desc; |
3286 | } |
3287 | |
3288 | if (d->residue >= SZ_4M) { |
3289 | dev_err(uc->ud->dev, |
3290 | "%s: Transfer size %u is over the supported 4M range\n" , |
3291 | __func__, d->residue); |
3292 | udma_free_hwdesc(uc, d); |
3293 | kfree(objp: d); |
3294 | return NULL; |
3295 | } |
3296 | |
3297 | h_desc = d->hwdesc[0].cppi5_desc_vaddr; |
3298 | cppi5_hdesc_set_pktlen(desc: h_desc, pkt_len: d->residue); |
3299 | |
3300 | return d; |
3301 | } |
3302 | |
3303 | static int udma_attach_metadata(struct dma_async_tx_descriptor *desc, |
3304 | void *data, size_t len) |
3305 | { |
3306 | struct udma_desc *d = to_udma_desc(t: desc); |
3307 | struct udma_chan *uc = to_udma_chan(c: desc->chan); |
3308 | struct cppi5_host_desc_t *h_desc; |
3309 | u32 psd_size = len; |
3310 | u32 flags = 0; |
3311 | |
3312 | if (!uc->config.pkt_mode || !uc->config.metadata_size) |
3313 | return -ENOTSUPP; |
3314 | |
3315 | if (!data || len > uc->config.metadata_size) |
3316 | return -EINVAL; |
3317 | |
3318 | if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE) |
3319 | return -EINVAL; |
3320 | |
3321 | h_desc = d->hwdesc[0].cppi5_desc_vaddr; |
3322 | if (d->dir == DMA_MEM_TO_DEV) |
3323 | memcpy(h_desc->epib, data, len); |
3324 | |
3325 | if (uc->config.needs_epib) |
3326 | psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE; |
3327 | |
3328 | d->metadata = data; |
3329 | d->metadata_size = len; |
3330 | if (uc->config.needs_epib) |
3331 | flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT; |
3332 | |
3333 | cppi5_hdesc_update_flags(desc: h_desc, flags); |
3334 | cppi5_hdesc_update_psdata_size(desc: h_desc, psdata_size: psd_size); |
3335 | |
3336 | return 0; |
3337 | } |
3338 | |
3339 | static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc, |
3340 | size_t *payload_len, size_t *max_len) |
3341 | { |
3342 | struct udma_desc *d = to_udma_desc(t: desc); |
3343 | struct udma_chan *uc = to_udma_chan(c: desc->chan); |
3344 | struct cppi5_host_desc_t *h_desc; |
3345 | |
3346 | if (!uc->config.pkt_mode || !uc->config.metadata_size) |
3347 | return ERR_PTR(error: -ENOTSUPP); |
3348 | |
3349 | h_desc = d->hwdesc[0].cppi5_desc_vaddr; |
3350 | |
3351 | *max_len = uc->config.metadata_size; |
3352 | |
3353 | *payload_len = cppi5_hdesc_epib_present(desc_hdr: &h_desc->hdr) ? |
3354 | CPPI5_INFO0_HDESC_EPIB_SIZE : 0; |
3355 | *payload_len += cppi5_hdesc_get_psdata_size(desc: h_desc); |
3356 | |
3357 | return h_desc->epib; |
3358 | } |
3359 | |
3360 | static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc, |
3361 | size_t payload_len) |
3362 | { |
3363 | struct udma_desc *d = to_udma_desc(t: desc); |
3364 | struct udma_chan *uc = to_udma_chan(c: desc->chan); |
3365 | struct cppi5_host_desc_t *h_desc; |
3366 | u32 psd_size = payload_len; |
3367 | u32 flags = 0; |
3368 | |
3369 | if (!uc->config.pkt_mode || !uc->config.metadata_size) |
3370 | return -ENOTSUPP; |
3371 | |
3372 | if (payload_len > uc->config.metadata_size) |
3373 | return -EINVAL; |
3374 | |
3375 | if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE) |
3376 | return -EINVAL; |
3377 | |
3378 | h_desc = d->hwdesc[0].cppi5_desc_vaddr; |
3379 | |
3380 | if (uc->config.needs_epib) { |
3381 | psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE; |
3382 | flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT; |
3383 | } |
3384 | |
3385 | cppi5_hdesc_update_flags(desc: h_desc, flags); |
3386 | cppi5_hdesc_update_psdata_size(desc: h_desc, psdata_size: psd_size); |
3387 | |
3388 | return 0; |
3389 | } |
3390 | |
3391 | static struct dma_descriptor_metadata_ops metadata_ops = { |
3392 | .attach = udma_attach_metadata, |
3393 | .get_ptr = udma_get_metadata_ptr, |
3394 | .set_len = udma_set_metadata_len, |
3395 | }; |
3396 | |
3397 | static struct dma_async_tx_descriptor * |
3398 | udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
3399 | unsigned int sglen, enum dma_transfer_direction dir, |
3400 | unsigned long tx_flags, void *context) |
3401 | { |
3402 | struct udma_chan *uc = to_udma_chan(c: chan); |
3403 | enum dma_slave_buswidth dev_width; |
3404 | struct udma_desc *d; |
3405 | u32 burst; |
3406 | |
3407 | if (dir != uc->config.dir && |
3408 | (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type)) { |
3409 | dev_err(chan->device->dev, |
3410 | "%s: chan%d is for %s, not supporting %s\n" , |
3411 | __func__, uc->id, |
3412 | dmaengine_get_direction_text(uc->config.dir), |
3413 | dmaengine_get_direction_text(dir)); |
3414 | return NULL; |
3415 | } |
3416 | |
3417 | if (dir == DMA_DEV_TO_MEM) { |
3418 | dev_width = uc->cfg.src_addr_width; |
3419 | burst = uc->cfg.src_maxburst; |
3420 | } else if (dir == DMA_MEM_TO_DEV) { |
3421 | dev_width = uc->cfg.dst_addr_width; |
3422 | burst = uc->cfg.dst_maxburst; |
3423 | } else { |
3424 | dev_err(chan->device->dev, "%s: bad direction?\n" , __func__); |
3425 | return NULL; |
3426 | } |
3427 | |
3428 | if (!burst) |
3429 | burst = 1; |
3430 | |
3431 | uc->config.tx_flags = tx_flags; |
3432 | |
3433 | if (uc->config.pkt_mode) |
3434 | d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags, |
3435 | context); |
3436 | else if (is_slave_direction(direction: uc->config.dir)) |
3437 | d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags, |
3438 | context); |
3439 | else |
3440 | d = udma_prep_slave_sg_triggered_tr(uc, sgl, sglen, dir, |
3441 | tx_flags, context); |
3442 | |
3443 | if (!d) |
3444 | return NULL; |
3445 | |
3446 | d->dir = dir; |
3447 | d->desc_idx = 0; |
3448 | d->tr_idx = 0; |
3449 | |
3450 | /* static TR for remote PDMA */ |
3451 | if (udma_configure_statictr(uc, d, dev_width, elcnt: burst)) { |
3452 | dev_err(uc->ud->dev, |
3453 | "%s: StaticTR Z is limited to maximum 4095 (%u)\n" , |
3454 | __func__, d->static_tr.bstcnt); |
3455 | |
3456 | udma_free_hwdesc(uc, d); |
3457 | kfree(objp: d); |
3458 | return NULL; |
3459 | } |
3460 | |
3461 | if (uc->config.metadata_size) |
3462 | d->vd.tx.metadata_ops = &metadata_ops; |
3463 | |
3464 | return vchan_tx_prep(vc: &uc->vc, vd: &d->vd, tx_flags); |
3465 | } |
3466 | |
3467 | static struct udma_desc * |
3468 | udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr, |
3469 | size_t buf_len, size_t period_len, |
3470 | enum dma_transfer_direction dir, unsigned long flags) |
3471 | { |
3472 | struct udma_desc *d; |
3473 | size_t tr_size, period_addr; |
3474 | struct cppi5_tr_type1_t *tr_req; |
3475 | unsigned int periods = buf_len / period_len; |
3476 | u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; |
3477 | unsigned int i; |
3478 | int num_tr; |
3479 | |
3480 | num_tr = udma_get_tr_counters(len: period_len, __ffs(buf_addr), tr0_cnt0: &tr0_cnt0, |
3481 | tr0_cnt1: &tr0_cnt1, tr1_cnt0: &tr1_cnt0); |
3482 | if (num_tr < 0) { |
3483 | dev_err(uc->ud->dev, "size %zu is not supported\n" , |
3484 | period_len); |
3485 | return NULL; |
3486 | } |
3487 | |
3488 | /* Now allocate and setup the descriptor. */ |
3489 | tr_size = sizeof(struct cppi5_tr_type1_t); |
3490 | d = udma_alloc_tr_desc(uc, tr_size, tr_count: periods * num_tr, dir); |
3491 | if (!d) |
3492 | return NULL; |
3493 | |
3494 | tr_req = d->hwdesc[0].tr_req_base; |
3495 | if (uc->ud->match_data->type == DMA_TYPE_UDMA) |
3496 | period_addr = buf_addr; |
3497 | else |
3498 | period_addr = buf_addr | |
3499 | ((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT); |
3500 | |
3501 | for (i = 0; i < periods; i++) { |
3502 | int tr_idx = i * num_tr; |
3503 | |
3504 | cppi5_tr_init(flags: &tr_req[tr_idx].flags, type: CPPI5_TR_TYPE1, static_tr: false, |
3505 | wait: false, event_size: CPPI5_TR_EVENT_SIZE_COMPLETION, cmd_id: 0); |
3506 | |
3507 | tr_req[tr_idx].addr = period_addr; |
3508 | tr_req[tr_idx].icnt0 = tr0_cnt0; |
3509 | tr_req[tr_idx].icnt1 = tr0_cnt1; |
3510 | tr_req[tr_idx].dim1 = tr0_cnt0; |
3511 | |
3512 | if (num_tr == 2) { |
3513 | cppi5_tr_csf_set(flags: &tr_req[tr_idx].flags, |
3514 | CPPI5_TR_CSF_SUPR_EVT); |
3515 | tr_idx++; |
3516 | |
3517 | cppi5_tr_init(flags: &tr_req[tr_idx].flags, type: CPPI5_TR_TYPE1, |
3518 | static_tr: false, wait: false, |
3519 | event_size: CPPI5_TR_EVENT_SIZE_COMPLETION, cmd_id: 0); |
3520 | |
3521 | tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0; |
3522 | tr_req[tr_idx].icnt0 = tr1_cnt0; |
3523 | tr_req[tr_idx].icnt1 = 1; |
3524 | tr_req[tr_idx].dim1 = tr1_cnt0; |
3525 | } |
3526 | |
3527 | if (!(flags & DMA_PREP_INTERRUPT)) |
3528 | cppi5_tr_csf_set(flags: &tr_req[tr_idx].flags, |
3529 | CPPI5_TR_CSF_SUPR_EVT); |
3530 | |
3531 | period_addr += period_len; |
3532 | } |
3533 | |
3534 | return d; |
3535 | } |
3536 | |
3537 | static struct udma_desc * |
3538 | udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr, |
3539 | size_t buf_len, size_t period_len, |
3540 | enum dma_transfer_direction dir, unsigned long flags) |
3541 | { |
3542 | struct udma_desc *d; |
3543 | u32 ring_id; |
3544 | int i; |
3545 | int periods = buf_len / period_len; |
3546 | |
3547 | if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1)) |
3548 | return NULL; |
3549 | |
3550 | if (period_len >= SZ_4M) |
3551 | return NULL; |
3552 | |
3553 | d = kzalloc(struct_size(d, hwdesc, periods), GFP_NOWAIT); |
3554 | if (!d) |
3555 | return NULL; |
3556 | |
3557 | d->hwdesc_count = periods; |
3558 | |
3559 | /* TODO: re-check this... */ |
3560 | if (dir == DMA_DEV_TO_MEM) |
3561 | ring_id = k3_ringacc_get_ring_id(ring: uc->rflow->r_ring); |
3562 | else |
3563 | ring_id = k3_ringacc_get_ring_id(ring: uc->tchan->tc_ring); |
3564 | |
3565 | if (uc->ud->match_data->type != DMA_TYPE_UDMA) |
3566 | buf_addr |= (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; |
3567 | |
3568 | for (i = 0; i < periods; i++) { |
3569 | struct udma_hwdesc *hwdesc = &d->hwdesc[i]; |
3570 | dma_addr_t period_addr = buf_addr + (period_len * i); |
3571 | struct cppi5_host_desc_t *h_desc; |
3572 | |
3573 | hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(pool: uc->hdesc_pool, |
3574 | GFP_NOWAIT, |
3575 | handle: &hwdesc->cppi5_desc_paddr); |
3576 | if (!hwdesc->cppi5_desc_vaddr) { |
3577 | dev_err(uc->ud->dev, |
3578 | "descriptor%d allocation failed\n" , i); |
3579 | |
3580 | udma_free_hwdesc(uc, d); |
3581 | kfree(objp: d); |
3582 | return NULL; |
3583 | } |
3584 | |
3585 | hwdesc->cppi5_desc_size = uc->config.hdesc_size; |
3586 | h_desc = hwdesc->cppi5_desc_vaddr; |
3587 | |
3588 | cppi5_hdesc_init(desc: h_desc, flags: 0, psdata_size: 0); |
3589 | cppi5_hdesc_set_pktlen(desc: h_desc, pkt_len: period_len); |
3590 | |
3591 | /* Flow and Packed ID */ |
3592 | cppi5_desc_set_pktids(desc_hdr: &h_desc->hdr, pkt_id: uc->id, |
3593 | CPPI5_INFO1_DESC_FLOWID_DEFAULT); |
3594 | cppi5_desc_set_retpolicy(desc_hdr: &h_desc->hdr, flags: 0, return_ring_id: ring_id); |
3595 | |
3596 | /* attach each period to a new descriptor */ |
3597 | cppi5_hdesc_attach_buf(desc: h_desc, |
3598 | buf: period_addr, buf_data_len: period_len, |
3599 | obuf: period_addr, obuf_len: period_len); |
3600 | } |
3601 | |
3602 | return d; |
3603 | } |
3604 | |
3605 | static struct dma_async_tx_descriptor * |
3606 | udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, |
3607 | size_t period_len, enum dma_transfer_direction dir, |
3608 | unsigned long flags) |
3609 | { |
3610 | struct udma_chan *uc = to_udma_chan(c: chan); |
3611 | enum dma_slave_buswidth dev_width; |
3612 | struct udma_desc *d; |
3613 | u32 burst; |
3614 | |
3615 | if (dir != uc->config.dir) { |
3616 | dev_err(chan->device->dev, |
3617 | "%s: chan%d is for %s, not supporting %s\n" , |
3618 | __func__, uc->id, |
3619 | dmaengine_get_direction_text(uc->config.dir), |
3620 | dmaengine_get_direction_text(dir)); |
3621 | return NULL; |
3622 | } |
3623 | |
3624 | uc->cyclic = true; |
3625 | |
3626 | if (dir == DMA_DEV_TO_MEM) { |
3627 | dev_width = uc->cfg.src_addr_width; |
3628 | burst = uc->cfg.src_maxburst; |
3629 | } else if (dir == DMA_MEM_TO_DEV) { |
3630 | dev_width = uc->cfg.dst_addr_width; |
3631 | burst = uc->cfg.dst_maxburst; |
3632 | } else { |
3633 | dev_err(uc->ud->dev, "%s: bad direction?\n" , __func__); |
3634 | return NULL; |
3635 | } |
3636 | |
3637 | if (!burst) |
3638 | burst = 1; |
3639 | |
3640 | if (uc->config.pkt_mode) |
3641 | d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len, |
3642 | dir, flags); |
3643 | else |
3644 | d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len, |
3645 | dir, flags); |
3646 | |
3647 | if (!d) |
3648 | return NULL; |
3649 | |
3650 | d->sglen = buf_len / period_len; |
3651 | |
3652 | d->dir = dir; |
3653 | d->residue = buf_len; |
3654 | |
3655 | /* static TR for remote PDMA */ |
3656 | if (udma_configure_statictr(uc, d, dev_width, elcnt: burst)) { |
3657 | dev_err(uc->ud->dev, |
3658 | "%s: StaticTR Z is limited to maximum 4095 (%u)\n" , |
3659 | __func__, d->static_tr.bstcnt); |
3660 | |
3661 | udma_free_hwdesc(uc, d); |
3662 | kfree(objp: d); |
3663 | return NULL; |
3664 | } |
3665 | |
3666 | if (uc->config.metadata_size) |
3667 | d->vd.tx.metadata_ops = &metadata_ops; |
3668 | |
3669 | return vchan_tx_prep(vc: &uc->vc, vd: &d->vd, tx_flags: flags); |
3670 | } |
3671 | |
3672 | static struct dma_async_tx_descriptor * |
3673 | udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, |
3674 | size_t len, unsigned long tx_flags) |
3675 | { |
3676 | struct udma_chan *uc = to_udma_chan(c: chan); |
3677 | struct udma_desc *d; |
3678 | struct cppi5_tr_type15_t *tr_req; |
3679 | int num_tr; |
3680 | size_t tr_size = sizeof(struct cppi5_tr_type15_t); |
3681 | u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; |
3682 | u32 csf = CPPI5_TR_CSF_SUPR_EVT; |
3683 | |
3684 | if (uc->config.dir != DMA_MEM_TO_MEM) { |
3685 | dev_err(chan->device->dev, |
3686 | "%s: chan%d is for %s, not supporting %s\n" , |
3687 | __func__, uc->id, |
3688 | dmaengine_get_direction_text(uc->config.dir), |
3689 | dmaengine_get_direction_text(DMA_MEM_TO_MEM)); |
3690 | return NULL; |
3691 | } |
3692 | |
3693 | num_tr = udma_get_tr_counters(len, __ffs(src | dest), tr0_cnt0: &tr0_cnt0, |
3694 | tr0_cnt1: &tr0_cnt1, tr1_cnt0: &tr1_cnt0); |
3695 | if (num_tr < 0) { |
3696 | dev_err(uc->ud->dev, "size %zu is not supported\n" , |
3697 | len); |
3698 | return NULL; |
3699 | } |
3700 | |
3701 | d = udma_alloc_tr_desc(uc, tr_size, tr_count: num_tr, dir: DMA_MEM_TO_MEM); |
3702 | if (!d) |
3703 | return NULL; |
3704 | |
3705 | d->dir = DMA_MEM_TO_MEM; |
3706 | d->desc_idx = 0; |
3707 | d->tr_idx = 0; |
3708 | d->residue = len; |
3709 | |
3710 | if (uc->ud->match_data->type != DMA_TYPE_UDMA) { |
3711 | src |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT; |
3712 | dest |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT; |
3713 | } else { |
3714 | csf |= CPPI5_TR_CSF_EOL_ICNT0; |
3715 | } |
3716 | |
3717 | tr_req = d->hwdesc[0].tr_req_base; |
3718 | |
3719 | cppi5_tr_init(flags: &tr_req[0].flags, type: CPPI5_TR_TYPE15, static_tr: false, wait: true, |
3720 | event_size: CPPI5_TR_EVENT_SIZE_COMPLETION, cmd_id: 0); |
3721 | cppi5_tr_csf_set(flags: &tr_req[0].flags, csf); |
3722 | |
3723 | tr_req[0].addr = src; |
3724 | tr_req[0].icnt0 = tr0_cnt0; |
3725 | tr_req[0].icnt1 = tr0_cnt1; |
3726 | tr_req[0].icnt2 = 1; |
3727 | tr_req[0].icnt3 = 1; |
3728 | tr_req[0].dim1 = tr0_cnt0; |
3729 | |
3730 | tr_req[0].daddr = dest; |
3731 | tr_req[0].dicnt0 = tr0_cnt0; |
3732 | tr_req[0].dicnt1 = tr0_cnt1; |
3733 | tr_req[0].dicnt2 = 1; |
3734 | tr_req[0].dicnt3 = 1; |
3735 | tr_req[0].ddim1 = tr0_cnt0; |
3736 | |
3737 | if (num_tr == 2) { |
3738 | cppi5_tr_init(flags: &tr_req[1].flags, type: CPPI5_TR_TYPE15, static_tr: false, wait: true, |
3739 | event_size: CPPI5_TR_EVENT_SIZE_COMPLETION, cmd_id: 0); |
3740 | cppi5_tr_csf_set(flags: &tr_req[1].flags, csf); |
3741 | |
3742 | tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0; |
3743 | tr_req[1].icnt0 = tr1_cnt0; |
3744 | tr_req[1].icnt1 = 1; |
3745 | tr_req[1].icnt2 = 1; |
3746 | tr_req[1].icnt3 = 1; |
3747 | |
3748 | tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0; |
3749 | tr_req[1].dicnt0 = tr1_cnt0; |
3750 | tr_req[1].dicnt1 = 1; |
3751 | tr_req[1].dicnt2 = 1; |
3752 | tr_req[1].dicnt3 = 1; |
3753 | } |
3754 | |
3755 | cppi5_tr_csf_set(flags: &tr_req[num_tr - 1].flags, csf: csf | CPPI5_TR_CSF_EOP); |
3756 | |
3757 | if (uc->config.metadata_size) |
3758 | d->vd.tx.metadata_ops = &metadata_ops; |
3759 | |
3760 | return vchan_tx_prep(vc: &uc->vc, vd: &d->vd, tx_flags); |
3761 | } |
3762 | |
3763 | static void udma_issue_pending(struct dma_chan *chan) |
3764 | { |
3765 | struct udma_chan *uc = to_udma_chan(c: chan); |
3766 | unsigned long flags; |
3767 | |
3768 | spin_lock_irqsave(&uc->vc.lock, flags); |
3769 | |
3770 | /* If we have something pending and no active descriptor, then */ |
3771 | if (vchan_issue_pending(vc: &uc->vc) && !uc->desc) { |
3772 | /* |
3773 | * start a descriptor if the channel is NOT [marked as |
3774 | * terminating _and_ it is still running (teardown has not |
3775 | * completed yet)]. |
3776 | */ |
3777 | if (!(uc->state == UDMA_CHAN_IS_TERMINATING && |
3778 | udma_is_chan_running(uc))) |
3779 | udma_start(uc); |
3780 | } |
3781 | |
3782 | spin_unlock_irqrestore(lock: &uc->vc.lock, flags); |
3783 | } |
3784 | |
3785 | static enum dma_status udma_tx_status(struct dma_chan *chan, |
3786 | dma_cookie_t cookie, |
3787 | struct dma_tx_state *txstate) |
3788 | { |
3789 | struct udma_chan *uc = to_udma_chan(c: chan); |
3790 | enum dma_status ret; |
3791 | unsigned long flags; |
3792 | |
3793 | spin_lock_irqsave(&uc->vc.lock, flags); |
3794 | |
3795 | ret = dma_cookie_status(chan, cookie, state: txstate); |
3796 | |
3797 | if (!udma_is_chan_running(uc)) |
3798 | ret = DMA_COMPLETE; |
3799 | |
3800 | if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc)) |
3801 | ret = DMA_PAUSED; |
3802 | |
3803 | if (ret == DMA_COMPLETE || !txstate) |
3804 | goto out; |
3805 | |
3806 | if (uc->desc && uc->desc->vd.tx.cookie == cookie) { |
3807 | u32 peer_bcnt = 0; |
3808 | u32 bcnt = 0; |
3809 | u32 residue = uc->desc->residue; |
3810 | u32 delay = 0; |
3811 | |
3812 | if (uc->desc->dir == DMA_MEM_TO_DEV) { |
3813 | bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG); |
3814 | |
3815 | if (uc->config.ep_type != PSIL_EP_NATIVE) { |
3816 | peer_bcnt = udma_tchanrt_read(uc, |
3817 | UDMA_CHAN_RT_PEER_BCNT_REG); |
3818 | |
3819 | if (bcnt > peer_bcnt) |
3820 | delay = bcnt - peer_bcnt; |
3821 | } |
3822 | } else if (uc->desc->dir == DMA_DEV_TO_MEM) { |
3823 | bcnt = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); |
3824 | |
3825 | if (uc->config.ep_type != PSIL_EP_NATIVE) { |
3826 | peer_bcnt = udma_rchanrt_read(uc, |
3827 | UDMA_CHAN_RT_PEER_BCNT_REG); |
3828 | |
3829 | if (peer_bcnt > bcnt) |
3830 | delay = peer_bcnt - bcnt; |
3831 | } |
3832 | } else { |
3833 | bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); |
3834 | } |
3835 | |
3836 | if (bcnt && !(bcnt % uc->desc->residue)) |
3837 | residue = 0; |
3838 | else |
3839 | residue -= bcnt % uc->desc->residue; |
3840 | |
3841 | if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) { |
3842 | ret = DMA_COMPLETE; |
3843 | delay = 0; |
3844 | } |
3845 | |
3846 | dma_set_residue(state: txstate, residue); |
3847 | dma_set_in_flight_bytes(state: txstate, in_flight_bytes: delay); |
3848 | |
3849 | } else { |
3850 | ret = DMA_COMPLETE; |
3851 | } |
3852 | |
3853 | out: |
3854 | spin_unlock_irqrestore(lock: &uc->vc.lock, flags); |
3855 | return ret; |
3856 | } |
3857 | |
3858 | static int udma_pause(struct dma_chan *chan) |
3859 | { |
3860 | struct udma_chan *uc = to_udma_chan(c: chan); |
3861 | |
3862 | /* pause the channel */ |
3863 | switch (uc->config.dir) { |
3864 | case DMA_DEV_TO_MEM: |
3865 | udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, |
3866 | UDMA_PEER_RT_EN_PAUSE, |
3867 | UDMA_PEER_RT_EN_PAUSE); |
3868 | break; |
3869 | case DMA_MEM_TO_DEV: |
3870 | udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, |
3871 | UDMA_PEER_RT_EN_PAUSE, |
3872 | UDMA_PEER_RT_EN_PAUSE); |
3873 | break; |
3874 | case DMA_MEM_TO_MEM: |
3875 | udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG, |
3876 | UDMA_CHAN_RT_CTL_PAUSE, |
3877 | UDMA_CHAN_RT_CTL_PAUSE); |
3878 | break; |
3879 | default: |
3880 | return -EINVAL; |
3881 | } |
3882 | |
3883 | return 0; |
3884 | } |
3885 | |
3886 | static int udma_resume(struct dma_chan *chan) |
3887 | { |
3888 | struct udma_chan *uc = to_udma_chan(c: chan); |
3889 | |
3890 | /* resume the channel */ |
3891 | switch (uc->config.dir) { |
3892 | case DMA_DEV_TO_MEM: |
3893 | udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, |
3894 | UDMA_PEER_RT_EN_PAUSE, val: 0); |
3895 | |
3896 | break; |
3897 | case DMA_MEM_TO_DEV: |
3898 | udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, |
3899 | UDMA_PEER_RT_EN_PAUSE, val: 0); |
3900 | break; |
3901 | case DMA_MEM_TO_MEM: |
3902 | udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG, |
3903 | UDMA_CHAN_RT_CTL_PAUSE, val: 0); |
3904 | break; |
3905 | default: |
3906 | return -EINVAL; |
3907 | } |
3908 | |
3909 | return 0; |
3910 | } |
3911 | |
3912 | static int udma_terminate_all(struct dma_chan *chan) |
3913 | { |
3914 | struct udma_chan *uc = to_udma_chan(c: chan); |
3915 | unsigned long flags; |
3916 | LIST_HEAD(head); |
3917 | |
3918 | spin_lock_irqsave(&uc->vc.lock, flags); |
3919 | |
3920 | if (udma_is_chan_running(uc)) |
3921 | udma_stop(uc); |
3922 | |
3923 | if (uc->desc) { |
3924 | uc->terminated_desc = uc->desc; |
3925 | uc->desc = NULL; |
3926 | uc->terminated_desc->terminated = true; |
3927 | cancel_delayed_work(dwork: &uc->tx_drain.work); |
3928 | } |
3929 | |
3930 | uc->paused = false; |
3931 | |
3932 | vchan_get_all_descriptors(vc: &uc->vc, head: &head); |
3933 | spin_unlock_irqrestore(lock: &uc->vc.lock, flags); |
3934 | vchan_dma_desc_free_list(vc: &uc->vc, head: &head); |
3935 | |
3936 | return 0; |
3937 | } |
3938 | |
3939 | static void udma_synchronize(struct dma_chan *chan) |
3940 | { |
3941 | struct udma_chan *uc = to_udma_chan(c: chan); |
3942 | unsigned long timeout = msecs_to_jiffies(m: 1000); |
3943 | |
3944 | vchan_synchronize(vc: &uc->vc); |
3945 | |
3946 | if (uc->state == UDMA_CHAN_IS_TERMINATING) { |
3947 | timeout = wait_for_completion_timeout(x: &uc->teardown_completed, |
3948 | timeout); |
3949 | if (!timeout) { |
3950 | dev_warn(uc->ud->dev, "chan%d teardown timeout!\n" , |
3951 | uc->id); |
3952 | udma_dump_chan_stdata(uc); |
3953 | udma_reset_chan(uc, hard: true); |
3954 | } |
3955 | } |
3956 | |
3957 | udma_reset_chan(uc, hard: false); |
3958 | if (udma_is_chan_running(uc)) |
3959 | dev_warn(uc->ud->dev, "chan%d refused to stop!\n" , uc->id); |
3960 | |
3961 | cancel_delayed_work_sync(dwork: &uc->tx_drain.work); |
3962 | udma_reset_rings(uc); |
3963 | } |
3964 | |
3965 | static void udma_desc_pre_callback(struct virt_dma_chan *vc, |
3966 | struct virt_dma_desc *vd, |
3967 | struct dmaengine_result *result) |
3968 | { |
3969 | struct udma_chan *uc = to_udma_chan(c: &vc->chan); |
3970 | struct udma_desc *d; |
3971 | u8 status; |
3972 | |
3973 | if (!vd) |
3974 | return; |
3975 | |
3976 | d = to_udma_desc(t: &vd->tx); |
3977 | |
3978 | if (d->metadata_size) |
3979 | udma_fetch_epib(uc, d); |
3980 | |
3981 | if (result) { |
3982 | void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, idx: d->desc_idx); |
3983 | |
3984 | if (cppi5_desc_get_type(desc_hdr: desc_vaddr) == |
3985 | CPPI5_INFO0_DESC_TYPE_VAL_HOST) { |
3986 | /* Provide residue information for the client */ |
3987 | result->residue = d->residue - |
3988 | cppi5_hdesc_get_pktlen(desc: desc_vaddr); |
3989 | if (result->residue) |
3990 | result->result = DMA_TRANS_ABORTED; |
3991 | else |
3992 | result->result = DMA_TRANS_NOERROR; |
3993 | } else { |
3994 | result->residue = 0; |
3995 | /* Propagate TR Response errors to the client */ |
3996 | status = d->hwdesc[0].tr_resp_base->status; |
3997 | if (status) |
3998 | result->result = DMA_TRANS_ABORTED; |
3999 | else |
4000 | result->result = DMA_TRANS_NOERROR; |
4001 | } |
4002 | } |
4003 | } |
4004 | |
4005 | /* |
4006 | * This tasklet handles the completion of a DMA descriptor by |
4007 | * calling its callback and freeing it. |
4008 | */ |
4009 | static void udma_vchan_complete(struct tasklet_struct *t) |
4010 | { |
4011 | struct virt_dma_chan *vc = from_tasklet(vc, t, task); |
4012 | struct virt_dma_desc *vd, *_vd; |
4013 | struct dmaengine_desc_callback cb; |
4014 | LIST_HEAD(head); |
4015 | |
4016 | spin_lock_irq(lock: &vc->lock); |
4017 | list_splice_tail_init(list: &vc->desc_completed, head: &head); |
4018 | vd = vc->cyclic; |
4019 | if (vd) { |
4020 | vc->cyclic = NULL; |
4021 | dmaengine_desc_get_callback(tx: &vd->tx, cb: &cb); |
4022 | } else { |
4023 | memset(&cb, 0, sizeof(cb)); |
4024 | } |
4025 | spin_unlock_irq(lock: &vc->lock); |
4026 | |
4027 | udma_desc_pre_callback(vc, vd, NULL); |
4028 | dmaengine_desc_callback_invoke(cb: &cb, NULL); |
4029 | |
4030 | list_for_each_entry_safe(vd, _vd, &head, node) { |
4031 | struct dmaengine_result result; |
4032 | |
4033 | dmaengine_desc_get_callback(tx: &vd->tx, cb: &cb); |
4034 | |
4035 | list_del(entry: &vd->node); |
4036 | |
4037 | udma_desc_pre_callback(vc, vd, result: &result); |
4038 | dmaengine_desc_callback_invoke(cb: &cb, result: &result); |
4039 | |
4040 | vchan_vdesc_fini(vd); |
4041 | } |
4042 | } |
4043 | |
4044 | static void udma_free_chan_resources(struct dma_chan *chan) |
4045 | { |
4046 | struct udma_chan *uc = to_udma_chan(c: chan); |
4047 | struct udma_dev *ud = to_udma_dev(d: chan->device); |
4048 | |
4049 | udma_terminate_all(chan); |
4050 | if (uc->terminated_desc) { |
4051 | udma_reset_chan(uc, hard: false); |
4052 | udma_reset_rings(uc); |
4053 | } |
4054 | |
4055 | cancel_delayed_work_sync(dwork: &uc->tx_drain.work); |
4056 | |
4057 | if (uc->irq_num_ring > 0) { |
4058 | free_irq(uc->irq_num_ring, uc); |
4059 | |
4060 | uc->irq_num_ring = 0; |
4061 | } |
4062 | if (uc->irq_num_udma > 0) { |
4063 | free_irq(uc->irq_num_udma, uc); |
4064 | |
4065 | uc->irq_num_udma = 0; |
4066 | } |
4067 | |
4068 | /* Release PSI-L pairing */ |
4069 | if (uc->psil_paired) { |
4070 | navss_psil_unpair(ud, src_thread: uc->config.src_thread, |
4071 | dst_thread: uc->config.dst_thread); |
4072 | uc->psil_paired = false; |
4073 | } |
4074 | |
4075 | vchan_free_chan_resources(vc: &uc->vc); |
4076 | tasklet_kill(t: &uc->vc.task); |
4077 | |
4078 | bcdma_free_bchan_resources(uc); |
4079 | udma_free_tx_resources(uc); |
4080 | udma_free_rx_resources(uc); |
4081 | udma_reset_uchan(uc); |
4082 | |
4083 | if (uc->use_dma_pool) { |
4084 | dma_pool_destroy(pool: uc->hdesc_pool); |
4085 | uc->use_dma_pool = false; |
4086 | } |
4087 | } |
4088 | |
4089 | static struct platform_driver udma_driver; |
4090 | static struct platform_driver bcdma_driver; |
4091 | static struct platform_driver pktdma_driver; |
4092 | |
4093 | struct udma_filter_param { |
4094 | int remote_thread_id; |
4095 | u32 atype; |
4096 | u32 asel; |
4097 | u32 tr_trigger_type; |
4098 | }; |
4099 | |
4100 | static bool udma_dma_filter_fn(struct dma_chan *chan, void *param) |
4101 | { |
4102 | struct udma_chan_config *ucc; |
4103 | struct psil_endpoint_config *ep_config; |
4104 | struct udma_filter_param *filter_param; |
4105 | struct udma_chan *uc; |
4106 | struct udma_dev *ud; |
4107 | |
4108 | if (chan->device->dev->driver != &udma_driver.driver && |
4109 | chan->device->dev->driver != &bcdma_driver.driver && |
4110 | chan->device->dev->driver != &pktdma_driver.driver) |
4111 | return false; |
4112 | |
4113 | uc = to_udma_chan(c: chan); |
4114 | ucc = &uc->config; |
4115 | ud = uc->ud; |
4116 | filter_param = param; |
4117 | |
4118 | if (filter_param->atype > 2) { |
4119 | dev_err(ud->dev, "Invalid channel atype: %u\n" , |
4120 | filter_param->atype); |
4121 | return false; |
4122 | } |
4123 | |
4124 | if (filter_param->asel > 15) { |
4125 | dev_err(ud->dev, "Invalid channel asel: %u\n" , |
4126 | filter_param->asel); |
4127 | return false; |
4128 | } |
4129 | |
4130 | ucc->remote_thread_id = filter_param->remote_thread_id; |
4131 | ucc->atype = filter_param->atype; |
4132 | ucc->asel = filter_param->asel; |
4133 | ucc->tr_trigger_type = filter_param->tr_trigger_type; |
4134 | |
4135 | if (ucc->tr_trigger_type) { |
4136 | ucc->dir = DMA_MEM_TO_MEM; |
4137 | goto triggered_bchan; |
4138 | } else if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET) { |
4139 | ucc->dir = DMA_MEM_TO_DEV; |
4140 | } else { |
4141 | ucc->dir = DMA_DEV_TO_MEM; |
4142 | } |
4143 | |
4144 | ep_config = psil_get_ep_config(thread_id: ucc->remote_thread_id); |
4145 | if (IS_ERR(ptr: ep_config)) { |
4146 | dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n" , |
4147 | ucc->remote_thread_id); |
4148 | ucc->dir = DMA_MEM_TO_MEM; |
4149 | ucc->remote_thread_id = -1; |
4150 | ucc->atype = 0; |
4151 | ucc->asel = 0; |
4152 | return false; |
4153 | } |
4154 | |
4155 | if (ud->match_data->type == DMA_TYPE_BCDMA && |
4156 | ep_config->pkt_mode) { |
4157 | dev_err(ud->dev, |
4158 | "Only TR mode is supported (psi-l thread 0x%04x)\n" , |
4159 | ucc->remote_thread_id); |
4160 | ucc->dir = DMA_MEM_TO_MEM; |
4161 | ucc->remote_thread_id = -1; |
4162 | ucc->atype = 0; |
4163 | ucc->asel = 0; |
4164 | return false; |
4165 | } |
4166 | |
4167 | ucc->pkt_mode = ep_config->pkt_mode; |
4168 | ucc->channel_tpl = ep_config->channel_tpl; |
4169 | ucc->notdpkt = ep_config->notdpkt; |
4170 | ucc->ep_type = ep_config->ep_type; |
4171 | |
4172 | if (ud->match_data->type == DMA_TYPE_PKTDMA && |
4173 | ep_config->mapped_channel_id >= 0) { |
4174 | ucc->mapped_channel_id = ep_config->mapped_channel_id; |
4175 | ucc->default_flow_id = ep_config->default_flow_id; |
4176 | } else { |
4177 | ucc->mapped_channel_id = -1; |
4178 | ucc->default_flow_id = -1; |
4179 | } |
4180 | |
4181 | if (ucc->ep_type != PSIL_EP_NATIVE) { |
4182 | const struct udma_match_data *match_data = ud->match_data; |
4183 | |
4184 | if (match_data->flags & UDMA_FLAG_PDMA_ACC32) |
4185 | ucc->enable_acc32 = ep_config->pdma_acc32; |
4186 | if (match_data->flags & UDMA_FLAG_PDMA_BURST) |
4187 | ucc->enable_burst = ep_config->pdma_burst; |
4188 | } |
4189 | |
4190 | ucc->needs_epib = ep_config->needs_epib; |
4191 | ucc->psd_size = ep_config->psd_size; |
4192 | ucc->metadata_size = |
4193 | (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) + |
4194 | ucc->psd_size; |
4195 | |
4196 | if (ucc->pkt_mode) |
4197 | ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) + |
4198 | ucc->metadata_size, ud->desc_align); |
4199 | |
4200 | dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n" , uc->id, |
4201 | ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir)); |
4202 | |
4203 | return true; |
4204 | |
4205 | triggered_bchan: |
4206 | dev_dbg(ud->dev, "chan%d: triggered channel (type: %u)\n" , uc->id, |
4207 | ucc->tr_trigger_type); |
4208 | |
4209 | return true; |
4210 | |
4211 | } |
4212 | |
4213 | static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec, |
4214 | struct of_dma *ofdma) |
4215 | { |
4216 | struct udma_dev *ud = ofdma->of_dma_data; |
4217 | dma_cap_mask_t mask = ud->ddev.cap_mask; |
4218 | struct udma_filter_param filter_param; |
4219 | struct dma_chan *chan; |
4220 | |
4221 | if (ud->match_data->type == DMA_TYPE_BCDMA) { |
4222 | if (dma_spec->args_count != 3) |
4223 | return NULL; |
4224 | |
4225 | filter_param.tr_trigger_type = dma_spec->args[0]; |
4226 | filter_param.remote_thread_id = dma_spec->args[1]; |
4227 | filter_param.asel = dma_spec->args[2]; |
4228 | filter_param.atype = 0; |
4229 | } else { |
4230 | if (dma_spec->args_count != 1 && dma_spec->args_count != 2) |
4231 | return NULL; |
4232 | |
4233 | filter_param.remote_thread_id = dma_spec->args[0]; |
4234 | filter_param.tr_trigger_type = 0; |
4235 | if (dma_spec->args_count == 2) { |
4236 | if (ud->match_data->type == DMA_TYPE_UDMA) { |
4237 | filter_param.atype = dma_spec->args[1]; |
4238 | filter_param.asel = 0; |
4239 | } else { |
4240 | filter_param.atype = 0; |
4241 | filter_param.asel = dma_spec->args[1]; |
4242 | } |
4243 | } else { |
4244 | filter_param.atype = 0; |
4245 | filter_param.asel = 0; |
4246 | } |
4247 | } |
4248 | |
4249 | chan = __dma_request_channel(mask: &mask, fn: udma_dma_filter_fn, fn_param: &filter_param, |
4250 | np: ofdma->of_node); |
4251 | if (!chan) { |
4252 | dev_err(ud->dev, "get channel fail in %s.\n" , __func__); |
4253 | return ERR_PTR(error: -EINVAL); |
4254 | } |
4255 | |
4256 | return chan; |
4257 | } |
4258 | |
4259 | static struct udma_match_data am654_main_data = { |
4260 | .type = DMA_TYPE_UDMA, |
4261 | .psil_base = 0x1000, |
4262 | .enable_memcpy_support = true, |
4263 | .statictr_z_mask = GENMASK(11, 0), |
4264 | .burst_size = { |
4265 | TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ |
4266 | TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */ |
4267 | 0, /* No UH Channels */ |
4268 | }, |
4269 | }; |
4270 | |
4271 | static struct udma_match_data am654_mcu_data = { |
4272 | .type = DMA_TYPE_UDMA, |
4273 | .psil_base = 0x6000, |
4274 | .enable_memcpy_support = false, |
4275 | .statictr_z_mask = GENMASK(11, 0), |
4276 | .burst_size = { |
4277 | TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ |
4278 | TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */ |
4279 | 0, /* No UH Channels */ |
4280 | }, |
4281 | }; |
4282 | |
4283 | static struct udma_match_data j721e_main_data = { |
4284 | .type = DMA_TYPE_UDMA, |
4285 | .psil_base = 0x1000, |
4286 | .enable_memcpy_support = true, |
4287 | .flags = UDMA_FLAGS_J7_CLASS, |
4288 | .statictr_z_mask = GENMASK(23, 0), |
4289 | .burst_size = { |
4290 | TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ |
4291 | TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* H Channels */ |
4292 | TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* UH Channels */ |
4293 | }, |
4294 | }; |
4295 | |
4296 | static struct udma_match_data j721e_mcu_data = { |
4297 | .type = DMA_TYPE_UDMA, |
4298 | .psil_base = 0x6000, |
4299 | .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */ |
4300 | .flags = UDMA_FLAGS_J7_CLASS, |
4301 | .statictr_z_mask = GENMASK(23, 0), |
4302 | .burst_size = { |
4303 | TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ |
4304 | TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES, /* H Channels */ |
4305 | 0, /* No UH Channels */ |
4306 | }, |
4307 | }; |
4308 | |
4309 | static struct udma_soc_data am62a_dmss_csi_soc_data = { |
4310 | .oes = { |
4311 | .bcdma_rchan_data = 0xe00, |
4312 | .bcdma_rchan_ring = 0x1000, |
4313 | }, |
4314 | }; |
4315 | |
4316 | static struct udma_soc_data j721s2_bcdma_csi_soc_data = { |
4317 | .oes = { |
4318 | .bcdma_tchan_data = 0x800, |
4319 | .bcdma_tchan_ring = 0xa00, |
4320 | .bcdma_rchan_data = 0xe00, |
4321 | .bcdma_rchan_ring = 0x1000, |
4322 | }, |
4323 | }; |
4324 | |
4325 | static struct udma_match_data am62a_bcdma_csirx_data = { |
4326 | .type = DMA_TYPE_BCDMA, |
4327 | .psil_base = 0x3100, |
4328 | .enable_memcpy_support = false, |
4329 | .burst_size = { |
4330 | TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ |
4331 | 0, /* No H Channels */ |
4332 | 0, /* No UH Channels */ |
4333 | }, |
4334 | .soc_data = &am62a_dmss_csi_soc_data, |
4335 | }; |
4336 | |
4337 | static struct udma_match_data am64_bcdma_data = { |
4338 | .type = DMA_TYPE_BCDMA, |
4339 | .psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */ |
4340 | .enable_memcpy_support = true, /* Supported via bchan */ |
4341 | .flags = UDMA_FLAGS_J7_CLASS, |
4342 | .statictr_z_mask = GENMASK(23, 0), |
4343 | .burst_size = { |
4344 | TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ |
4345 | 0, /* No H Channels */ |
4346 | 0, /* No UH Channels */ |
4347 | }, |
4348 | }; |
4349 | |
4350 | static struct udma_match_data am64_pktdma_data = { |
4351 | .type = DMA_TYPE_PKTDMA, |
4352 | .psil_base = 0x1000, |
4353 | .enable_memcpy_support = false, /* PKTDMA does not support MEM_TO_MEM */ |
4354 | .flags = UDMA_FLAGS_J7_CLASS, |
4355 | .statictr_z_mask = GENMASK(23, 0), |
4356 | .burst_size = { |
4357 | TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ |
4358 | 0, /* No H Channels */ |
4359 | 0, /* No UH Channels */ |
4360 | }, |
4361 | }; |
4362 | |
4363 | static struct udma_match_data j721s2_bcdma_csi_data = { |
4364 | .type = DMA_TYPE_BCDMA, |
4365 | .psil_base = 0x2000, |
4366 | .enable_memcpy_support = false, |
4367 | .burst_size = { |
4368 | TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ |
4369 | 0, /* No H Channels */ |
4370 | 0, /* No UH Channels */ |
4371 | }, |
4372 | .soc_data = &j721s2_bcdma_csi_soc_data, |
4373 | }; |
4374 | |
4375 | static const struct of_device_id udma_of_match[] = { |
4376 | { |
4377 | .compatible = "ti,am654-navss-main-udmap" , |
4378 | .data = &am654_main_data, |
4379 | }, |
4380 | { |
4381 | .compatible = "ti,am654-navss-mcu-udmap" , |
4382 | .data = &am654_mcu_data, |
4383 | }, { |
4384 | .compatible = "ti,j721e-navss-main-udmap" , |
4385 | .data = &j721e_main_data, |
4386 | }, { |
4387 | .compatible = "ti,j721e-navss-mcu-udmap" , |
4388 | .data = &j721e_mcu_data, |
4389 | }, |
4390 | { |
4391 | .compatible = "ti,am64-dmss-bcdma" , |
4392 | .data = &am64_bcdma_data, |
4393 | }, |
4394 | { |
4395 | .compatible = "ti,am64-dmss-pktdma" , |
4396 | .data = &am64_pktdma_data, |
4397 | }, |
4398 | { |
4399 | .compatible = "ti,am62a-dmss-bcdma-csirx" , |
4400 | .data = &am62a_bcdma_csirx_data, |
4401 | }, |
4402 | { |
4403 | .compatible = "ti,j721s2-dmss-bcdma-csi" , |
4404 | .data = &j721s2_bcdma_csi_data, |
4405 | }, |
4406 | { /* Sentinel */ }, |
4407 | }; |
4408 | |
4409 | static struct udma_soc_data am654_soc_data = { |
4410 | .oes = { |
4411 | .udma_rchan = 0x200, |
4412 | }, |
4413 | }; |
4414 | |
4415 | static struct udma_soc_data j721e_soc_data = { |
4416 | .oes = { |
4417 | .udma_rchan = 0x400, |
4418 | }, |
4419 | }; |
4420 | |
4421 | static struct udma_soc_data j7200_soc_data = { |
4422 | .oes = { |
4423 | .udma_rchan = 0x80, |
4424 | }, |
4425 | }; |
4426 | |
4427 | static struct udma_soc_data am64_soc_data = { |
4428 | .oes = { |
4429 | .bcdma_bchan_data = 0x2200, |
4430 | .bcdma_bchan_ring = 0x2400, |
4431 | .bcdma_tchan_data = 0x2800, |
4432 | .bcdma_tchan_ring = 0x2a00, |
4433 | .bcdma_rchan_data = 0x2e00, |
4434 | .bcdma_rchan_ring = 0x3000, |
4435 | .pktdma_tchan_flow = 0x1200, |
4436 | .pktdma_rchan_flow = 0x1600, |
4437 | }, |
4438 | .bcdma_trigger_event_offset = 0xc400, |
4439 | }; |
4440 | |
4441 | static const struct soc_device_attribute k3_soc_devices[] = { |
4442 | { .family = "AM65X" , .data = &am654_soc_data }, |
4443 | { .family = "J721E" , .data = &j721e_soc_data }, |
4444 | { .family = "J7200" , .data = &j7200_soc_data }, |
4445 | { .family = "AM64X" , .data = &am64_soc_data }, |
4446 | { .family = "J721S2" , .data = &j721e_soc_data}, |
4447 | { .family = "AM62X" , .data = &am64_soc_data }, |
4448 | { .family = "AM62AX" , .data = &am64_soc_data }, |
4449 | { .family = "J784S4" , .data = &j721e_soc_data }, |
4450 | { .family = "AM62PX" , .data = &am64_soc_data }, |
4451 | { .family = "J722S" , .data = &am64_soc_data }, |
4452 | { /* sentinel */ } |
4453 | }; |
4454 | |
4455 | static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud) |
4456 | { |
4457 | u32 cap2, cap3, cap4; |
4458 | int i; |
4459 | |
4460 | ud->mmrs[MMR_GCFG] = devm_platform_ioremap_resource_byname(pdev, name: mmr_names[MMR_GCFG]); |
4461 | if (IS_ERR(ptr: ud->mmrs[MMR_GCFG])) |
4462 | return PTR_ERR(ptr: ud->mmrs[MMR_GCFG]); |
4463 | |
4464 | cap2 = udma_read(base: ud->mmrs[MMR_GCFG], reg: 0x28); |
4465 | cap3 = udma_read(base: ud->mmrs[MMR_GCFG], reg: 0x2c); |
4466 | |
4467 | switch (ud->match_data->type) { |
4468 | case DMA_TYPE_UDMA: |
4469 | ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3); |
4470 | ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2); |
4471 | ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2); |
4472 | ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2); |
4473 | break; |
4474 | case DMA_TYPE_BCDMA: |
4475 | ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2); |
4476 | ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2); |
4477 | ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2); |
4478 | ud->rflow_cnt = ud->rchan_cnt; |
4479 | break; |
4480 | case DMA_TYPE_PKTDMA: |
4481 | cap4 = udma_read(base: ud->mmrs[MMR_GCFG], reg: 0x30); |
4482 | ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2); |
4483 | ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2); |
4484 | ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3); |
4485 | ud->tflow_cnt = PKTDMA_CAP4_TFLOW_CNT(cap4); |
4486 | break; |
4487 | default: |
4488 | return -EINVAL; |
4489 | } |
4490 | |
4491 | for (i = 1; i < MMR_LAST; i++) { |
4492 | if (i == MMR_BCHANRT && ud->bchan_cnt == 0) |
4493 | continue; |
4494 | if (i == MMR_TCHANRT && ud->tchan_cnt == 0) |
4495 | continue; |
4496 | if (i == MMR_RCHANRT && ud->rchan_cnt == 0) |
4497 | continue; |
4498 | |
4499 | ud->mmrs[i] = devm_platform_ioremap_resource_byname(pdev, name: mmr_names[i]); |
4500 | if (IS_ERR(ptr: ud->mmrs[i])) |
4501 | return PTR_ERR(ptr: ud->mmrs[i]); |
4502 | } |
4503 | |
4504 | return 0; |
4505 | } |
4506 | |
4507 | static void udma_mark_resource_ranges(struct udma_dev *ud, unsigned long *map, |
4508 | struct ti_sci_resource_desc *rm_desc, |
4509 | char *name) |
4510 | { |
4511 | bitmap_clear(map, start: rm_desc->start, nbits: rm_desc->num); |
4512 | bitmap_clear(map, start: rm_desc->start_sec, nbits: rm_desc->num_sec); |
4513 | dev_dbg(ud->dev, "ti_sci resource range for %s: %d:%d | %d:%d\n" , name, |
4514 | rm_desc->start, rm_desc->num, rm_desc->start_sec, |
4515 | rm_desc->num_sec); |
4516 | } |
4517 | |
4518 | static const char * const range_names[] = { |
4519 | [RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan" , |
4520 | [RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan" , |
4521 | [RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan" , |
4522 | [RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow" , |
4523 | [RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow" , |
4524 | }; |
4525 | |
4526 | static int udma_setup_resources(struct udma_dev *ud) |
4527 | { |
4528 | int ret, i, j; |
4529 | struct device *dev = ud->dev; |
4530 | struct ti_sci_resource *rm_res, irq_res; |
4531 | struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; |
4532 | u32 cap3; |
4533 | |
4534 | /* Set up the throughput level start indexes */ |
4535 | cap3 = udma_read(base: ud->mmrs[MMR_GCFG], reg: 0x2c); |
4536 | if (of_device_is_compatible(device: dev->of_node, |
4537 | "ti,am654-navss-main-udmap" )) { |
4538 | ud->tchan_tpl.levels = 2; |
4539 | ud->tchan_tpl.start_idx[0] = 8; |
4540 | } else if (of_device_is_compatible(device: dev->of_node, |
4541 | "ti,am654-navss-mcu-udmap" )) { |
4542 | ud->tchan_tpl.levels = 2; |
4543 | ud->tchan_tpl.start_idx[0] = 2; |
4544 | } else if (UDMA_CAP3_UCHAN_CNT(cap3)) { |
4545 | ud->tchan_tpl.levels = 3; |
4546 | ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3); |
4547 | ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); |
4548 | } else if (UDMA_CAP3_HCHAN_CNT(cap3)) { |
4549 | ud->tchan_tpl.levels = 2; |
4550 | ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); |
4551 | } else { |
4552 | ud->tchan_tpl.levels = 1; |
4553 | } |
4554 | |
4555 | ud->rchan_tpl.levels = ud->tchan_tpl.levels; |
4556 | ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0]; |
4557 | ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1]; |
4558 | |
4559 | ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), |
4560 | size: sizeof(unsigned long), GFP_KERNEL); |
4561 | ud->tchans = devm_kcalloc(dev, n: ud->tchan_cnt, size: sizeof(*ud->tchans), |
4562 | GFP_KERNEL); |
4563 | ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), |
4564 | size: sizeof(unsigned long), GFP_KERNEL); |
4565 | ud->rchans = devm_kcalloc(dev, n: ud->rchan_cnt, size: sizeof(*ud->rchans), |
4566 | GFP_KERNEL); |
4567 | ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt), |
4568 | size: sizeof(unsigned long), |
4569 | GFP_KERNEL); |
4570 | ud->rflow_gp_map_allocated = devm_kcalloc(dev, |
4571 | BITS_TO_LONGS(ud->rflow_cnt), |
4572 | size: sizeof(unsigned long), |
4573 | GFP_KERNEL); |
4574 | ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt), |
4575 | size: sizeof(unsigned long), |
4576 | GFP_KERNEL); |
4577 | ud->rflows = devm_kcalloc(dev, n: ud->rflow_cnt, size: sizeof(*ud->rflows), |
4578 | GFP_KERNEL); |
4579 | |
4580 | if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map || |
4581 | !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans || |
4582 | !ud->rflows || !ud->rflow_in_use) |
4583 | return -ENOMEM; |
4584 | |
4585 | /* |
4586 | * RX flows with the same Ids as RX channels are reserved to be used |
4587 | * as default flows if remote HW can't generate flow_ids. Those |
4588 | * RX flows can be requested only explicitly by id. |
4589 | */ |
4590 | bitmap_set(map: ud->rflow_gp_map_allocated, start: 0, nbits: ud->rchan_cnt); |
4591 | |
4592 | /* by default no GP rflows are assigned to Linux */ |
4593 | bitmap_set(map: ud->rflow_gp_map, start: 0, nbits: ud->rflow_cnt); |
4594 | |
4595 | /* Get resource ranges from tisci */ |
4596 | for (i = 0; i < RM_RANGE_LAST; i++) { |
4597 | if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW) |
4598 | continue; |
4599 | |
4600 | tisci_rm->rm_ranges[i] = |
4601 | devm_ti_sci_get_of_resource(handle: tisci_rm->tisci, dev, |
4602 | dev_id: tisci_rm->tisci_dev_id, |
4603 | of_prop: (char *)range_names[i]); |
4604 | } |
4605 | |
4606 | /* tchan ranges */ |
4607 | rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; |
4608 | if (IS_ERR(ptr: rm_res)) { |
4609 | bitmap_zero(dst: ud->tchan_map, nbits: ud->tchan_cnt); |
4610 | irq_res.sets = 1; |
4611 | } else { |
4612 | bitmap_fill(dst: ud->tchan_map, nbits: ud->tchan_cnt); |
4613 | for (i = 0; i < rm_res->sets; i++) |
4614 | udma_mark_resource_ranges(ud, map: ud->tchan_map, |
4615 | rm_desc: &rm_res->desc[i], name: "tchan" ); |
4616 | irq_res.sets = rm_res->sets; |
4617 | } |
4618 | |
4619 | /* rchan and matching default flow ranges */ |
4620 | rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; |
4621 | if (IS_ERR(ptr: rm_res)) { |
4622 | bitmap_zero(dst: ud->rchan_map, nbits: ud->rchan_cnt); |
4623 | irq_res.sets++; |
4624 | } else { |
4625 | bitmap_fill(dst: ud->rchan_map, nbits: ud->rchan_cnt); |
4626 | for (i = 0; i < rm_res->sets; i++) |
4627 | udma_mark_resource_ranges(ud, map: ud->rchan_map, |
4628 | rm_desc: &rm_res->desc[i], name: "rchan" ); |
4629 | irq_res.sets += rm_res->sets; |
4630 | } |
4631 | |
4632 | irq_res.desc = kcalloc(n: irq_res.sets, size: sizeof(*irq_res.desc), GFP_KERNEL); |
4633 | if (!irq_res.desc) |
4634 | return -ENOMEM; |
4635 | rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; |
4636 | if (IS_ERR(ptr: rm_res)) { |
4637 | irq_res.desc[0].start = 0; |
4638 | irq_res.desc[0].num = ud->tchan_cnt; |
4639 | i = 1; |
4640 | } else { |
4641 | for (i = 0; i < rm_res->sets; i++) { |
4642 | irq_res.desc[i].start = rm_res->desc[i].start; |
4643 | irq_res.desc[i].num = rm_res->desc[i].num; |
4644 | irq_res.desc[i].start_sec = rm_res->desc[i].start_sec; |
4645 | irq_res.desc[i].num_sec = rm_res->desc[i].num_sec; |
4646 | } |
4647 | } |
4648 | rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; |
4649 | if (IS_ERR(ptr: rm_res)) { |
4650 | irq_res.desc[i].start = 0; |
4651 | irq_res.desc[i].num = ud->rchan_cnt; |
4652 | } else { |
4653 | for (j = 0; j < rm_res->sets; j++, i++) { |
4654 | if (rm_res->desc[j].num) { |
4655 | irq_res.desc[i].start = rm_res->desc[j].start + |
4656 | ud->soc_data->oes.udma_rchan; |
4657 | irq_res.desc[i].num = rm_res->desc[j].num; |
4658 | } |
4659 | if (rm_res->desc[j].num_sec) { |
4660 | irq_res.desc[i].start_sec = rm_res->desc[j].start_sec + |
4661 | ud->soc_data->oes.udma_rchan; |
4662 | irq_res.desc[i].num_sec = rm_res->desc[j].num_sec; |
4663 | } |
4664 | } |
4665 | } |
4666 | ret = ti_sci_inta_msi_domain_alloc_irqs(dev: ud->dev, res: &irq_res); |
4667 | kfree(objp: irq_res.desc); |
4668 | if (ret) { |
4669 | dev_err(ud->dev, "Failed to allocate MSI interrupts\n" ); |
4670 | return ret; |
4671 | } |
4672 | |
4673 | /* GP rflow ranges */ |
4674 | rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; |
4675 | if (IS_ERR(ptr: rm_res)) { |
4676 | /* all gp flows are assigned exclusively to Linux */ |
4677 | bitmap_clear(map: ud->rflow_gp_map, start: ud->rchan_cnt, |
4678 | nbits: ud->rflow_cnt - ud->rchan_cnt); |
4679 | } else { |
4680 | for (i = 0; i < rm_res->sets; i++) |
4681 | udma_mark_resource_ranges(ud, map: ud->rflow_gp_map, |
4682 | rm_desc: &rm_res->desc[i], name: "gp-rflow" ); |
4683 | } |
4684 | |
4685 | return 0; |
4686 | } |
4687 | |
4688 | static int bcdma_setup_resources(struct udma_dev *ud) |
4689 | { |
4690 | int ret, i, j; |
4691 | struct device *dev = ud->dev; |
4692 | struct ti_sci_resource *rm_res, irq_res; |
4693 | struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; |
4694 | const struct udma_oes_offsets *oes = &ud->soc_data->oes; |
4695 | u32 cap; |
4696 | |
4697 | /* Set up the throughput level start indexes */ |
4698 | cap = udma_read(base: ud->mmrs[MMR_GCFG], reg: 0x2c); |
4699 | if (BCDMA_CAP3_UBCHAN_CNT(cap)) { |
4700 | ud->bchan_tpl.levels = 3; |
4701 | ud->bchan_tpl.start_idx[1] = BCDMA_CAP3_UBCHAN_CNT(cap); |
4702 | ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap); |
4703 | } else if (BCDMA_CAP3_HBCHAN_CNT(cap)) { |
4704 | ud->bchan_tpl.levels = 2; |
4705 | ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap); |
4706 | } else { |
4707 | ud->bchan_tpl.levels = 1; |
4708 | } |
4709 | |
4710 | cap = udma_read(base: ud->mmrs[MMR_GCFG], reg: 0x30); |
4711 | if (BCDMA_CAP4_URCHAN_CNT(cap)) { |
4712 | ud->rchan_tpl.levels = 3; |
4713 | ud->rchan_tpl.start_idx[1] = BCDMA_CAP4_URCHAN_CNT(cap); |
4714 | ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap); |
4715 | } else if (BCDMA_CAP4_HRCHAN_CNT(cap)) { |
4716 | ud->rchan_tpl.levels = 2; |
4717 | ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap); |
4718 | } else { |
4719 | ud->rchan_tpl.levels = 1; |
4720 | } |
4721 | |
4722 | if (BCDMA_CAP4_UTCHAN_CNT(cap)) { |
4723 | ud->tchan_tpl.levels = 3; |
4724 | ud->tchan_tpl.start_idx[1] = BCDMA_CAP4_UTCHAN_CNT(cap); |
4725 | ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap); |
4726 | } else if (BCDMA_CAP4_HTCHAN_CNT(cap)) { |
4727 | ud->tchan_tpl.levels = 2; |
4728 | ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap); |
4729 | } else { |
4730 | ud->tchan_tpl.levels = 1; |
4731 | } |
4732 | |
4733 | ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt), |
4734 | size: sizeof(unsigned long), GFP_KERNEL); |
4735 | ud->bchans = devm_kcalloc(dev, n: ud->bchan_cnt, size: sizeof(*ud->bchans), |
4736 | GFP_KERNEL); |
4737 | ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), |
4738 | size: sizeof(unsigned long), GFP_KERNEL); |
4739 | ud->tchans = devm_kcalloc(dev, n: ud->tchan_cnt, size: sizeof(*ud->tchans), |
4740 | GFP_KERNEL); |
4741 | ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), |
4742 | size: sizeof(unsigned long), GFP_KERNEL); |
4743 | ud->rchans = devm_kcalloc(dev, n: ud->rchan_cnt, size: sizeof(*ud->rchans), |
4744 | GFP_KERNEL); |
4745 | /* BCDMA do not really have flows, but the driver expect it */ |
4746 | ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rchan_cnt), |
4747 | size: sizeof(unsigned long), |
4748 | GFP_KERNEL); |
4749 | ud->rflows = devm_kcalloc(dev, n: ud->rchan_cnt, size: sizeof(*ud->rflows), |
4750 | GFP_KERNEL); |
4751 | |
4752 | if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map || |
4753 | !ud->rflow_in_use || !ud->bchans || !ud->tchans || !ud->rchans || |
4754 | !ud->rflows) |
4755 | return -ENOMEM; |
4756 | |
4757 | /* Get resource ranges from tisci */ |
4758 | for (i = 0; i < RM_RANGE_LAST; i++) { |
4759 | if (i == RM_RANGE_RFLOW || i == RM_RANGE_TFLOW) |
4760 | continue; |
4761 | if (i == RM_RANGE_BCHAN && ud->bchan_cnt == 0) |
4762 | continue; |
4763 | if (i == RM_RANGE_TCHAN && ud->tchan_cnt == 0) |
4764 | continue; |
4765 | if (i == RM_RANGE_RCHAN && ud->rchan_cnt == 0) |
4766 | continue; |
4767 | |
4768 | tisci_rm->rm_ranges[i] = |
4769 | devm_ti_sci_get_of_resource(handle: tisci_rm->tisci, dev, |
4770 | dev_id: tisci_rm->tisci_dev_id, |
4771 | of_prop: (char *)range_names[i]); |
4772 | } |
4773 | |
4774 | irq_res.sets = 0; |
4775 | |
4776 | /* bchan ranges */ |
4777 | if (ud->bchan_cnt) { |
4778 | rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN]; |
4779 | if (IS_ERR(ptr: rm_res)) { |
4780 | bitmap_zero(dst: ud->bchan_map, nbits: ud->bchan_cnt); |
4781 | irq_res.sets++; |
4782 | } else { |
4783 | bitmap_fill(dst: ud->bchan_map, nbits: ud->bchan_cnt); |
4784 | for (i = 0; i < rm_res->sets; i++) |
4785 | udma_mark_resource_ranges(ud, map: ud->bchan_map, |
4786 | rm_desc: &rm_res->desc[i], |
4787 | name: "bchan" ); |
4788 | irq_res.sets += rm_res->sets; |
4789 | } |
4790 | } |
4791 | |
4792 | /* tchan ranges */ |
4793 | if (ud->tchan_cnt) { |
4794 | rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; |
4795 | if (IS_ERR(ptr: rm_res)) { |
4796 | bitmap_zero(dst: ud->tchan_map, nbits: ud->tchan_cnt); |
4797 | irq_res.sets += 2; |
4798 | } else { |
4799 | bitmap_fill(dst: ud->tchan_map, nbits: ud->tchan_cnt); |
4800 | for (i = 0; i < rm_res->sets; i++) |
4801 | udma_mark_resource_ranges(ud, map: ud->tchan_map, |
4802 | rm_desc: &rm_res->desc[i], |
4803 | name: "tchan" ); |
4804 | irq_res.sets += rm_res->sets * 2; |
4805 | } |
4806 | } |
4807 | |
4808 | /* rchan ranges */ |
4809 | if (ud->rchan_cnt) { |
4810 | rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; |
4811 | if (IS_ERR(ptr: rm_res)) { |
4812 | bitmap_zero(dst: ud->rchan_map, nbits: ud->rchan_cnt); |
4813 | irq_res.sets += 2; |
4814 | } else { |
4815 | bitmap_fill(dst: ud->rchan_map, nbits: ud->rchan_cnt); |
4816 | for (i = 0; i < rm_res->sets; i++) |
4817 | udma_mark_resource_ranges(ud, map: ud->rchan_map, |
4818 | rm_desc: &rm_res->desc[i], |
4819 | name: "rchan" ); |
4820 | irq_res.sets += rm_res->sets * 2; |
4821 | } |
4822 | } |
4823 | |
4824 | irq_res.desc = kcalloc(n: irq_res.sets, size: sizeof(*irq_res.desc), GFP_KERNEL); |
4825 | if (!irq_res.desc) |
4826 | return -ENOMEM; |
4827 | if (ud->bchan_cnt) { |
4828 | rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN]; |
4829 | if (IS_ERR(ptr: rm_res)) { |
4830 | irq_res.desc[0].start = oes->bcdma_bchan_ring; |
4831 | irq_res.desc[0].num = ud->bchan_cnt; |
4832 | i = 1; |
4833 | } else { |
4834 | for (i = 0; i < rm_res->sets; i++) { |
4835 | irq_res.desc[i].start = rm_res->desc[i].start + |
4836 | oes->bcdma_bchan_ring; |
4837 | irq_res.desc[i].num = rm_res->desc[i].num; |
4838 | } |
4839 | } |
4840 | } else { |
4841 | i = 0; |
4842 | } |
4843 | |
4844 | if (ud->tchan_cnt) { |
4845 | rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; |
4846 | if (IS_ERR(ptr: rm_res)) { |
4847 | irq_res.desc[i].start = oes->bcdma_tchan_data; |
4848 | irq_res.desc[i].num = ud->tchan_cnt; |
4849 | irq_res.desc[i + 1].start = oes->bcdma_tchan_ring; |
4850 | irq_res.desc[i + 1].num = ud->tchan_cnt; |
4851 | i += 2; |
4852 | } else { |
4853 | for (j = 0; j < rm_res->sets; j++, i += 2) { |
4854 | irq_res.desc[i].start = rm_res->desc[j].start + |
4855 | oes->bcdma_tchan_data; |
4856 | irq_res.desc[i].num = rm_res->desc[j].num; |
4857 | |
4858 | irq_res.desc[i + 1].start = rm_res->desc[j].start + |
4859 | oes->bcdma_tchan_ring; |
4860 | irq_res.desc[i + 1].num = rm_res->desc[j].num; |
4861 | } |
4862 | } |
4863 | } |
4864 | if (ud->rchan_cnt) { |
4865 | rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; |
4866 | if (IS_ERR(ptr: rm_res)) { |
4867 | irq_res.desc[i].start = oes->bcdma_rchan_data; |
4868 | irq_res.desc[i].num = ud->rchan_cnt; |
4869 | irq_res.desc[i + 1].start = oes->bcdma_rchan_ring; |
4870 | irq_res.desc[i + 1].num = ud->rchan_cnt; |
4871 | i += 2; |
4872 | } else { |
4873 | for (j = 0; j < rm_res->sets; j++, i += 2) { |
4874 | irq_res.desc[i].start = rm_res->desc[j].start + |
4875 | oes->bcdma_rchan_data; |
4876 | irq_res.desc[i].num = rm_res->desc[j].num; |
4877 | |
4878 | irq_res.desc[i + 1].start = rm_res->desc[j].start + |
4879 | oes->bcdma_rchan_ring; |
4880 | irq_res.desc[i + 1].num = rm_res->desc[j].num; |
4881 | } |
4882 | } |
4883 | } |
4884 | |
4885 | ret = ti_sci_inta_msi_domain_alloc_irqs(dev: ud->dev, res: &irq_res); |
4886 | kfree(objp: irq_res.desc); |
4887 | if (ret) { |
4888 | dev_err(ud->dev, "Failed to allocate MSI interrupts\n" ); |
4889 | return ret; |
4890 | } |
4891 | |
4892 | return 0; |
4893 | } |
4894 | |
4895 | static int pktdma_setup_resources(struct udma_dev *ud) |
4896 | { |
4897 | int ret, i, j; |
4898 | struct device *dev = ud->dev; |
4899 | struct ti_sci_resource *rm_res, irq_res; |
4900 | struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; |
4901 | const struct udma_oes_offsets *oes = &ud->soc_data->oes; |
4902 | u32 cap3; |
4903 | |
4904 | /* Set up the throughput level start indexes */ |
4905 | cap3 = udma_read(base: ud->mmrs[MMR_GCFG], reg: 0x2c); |
4906 | if (UDMA_CAP3_UCHAN_CNT(cap3)) { |
4907 | ud->tchan_tpl.levels = 3; |
4908 | ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3); |
4909 | ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); |
4910 | } else if (UDMA_CAP3_HCHAN_CNT(cap3)) { |
4911 | ud->tchan_tpl.levels = 2; |
4912 | ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); |
4913 | } else { |
4914 | ud->tchan_tpl.levels = 1; |
4915 | } |
4916 | |
4917 | ud->rchan_tpl.levels = ud->tchan_tpl.levels; |
4918 | ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0]; |
4919 | ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1]; |
4920 | |
4921 | ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), |
4922 | size: sizeof(unsigned long), GFP_KERNEL); |
4923 | ud->tchans = devm_kcalloc(dev, n: ud->tchan_cnt, size: sizeof(*ud->tchans), |
4924 | GFP_KERNEL); |
4925 | ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), |
4926 | size: sizeof(unsigned long), GFP_KERNEL); |
4927 | ud->rchans = devm_kcalloc(dev, n: ud->rchan_cnt, size: sizeof(*ud->rchans), |
4928 | GFP_KERNEL); |
4929 | ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt), |
4930 | size: sizeof(unsigned long), |
4931 | GFP_KERNEL); |
4932 | ud->rflows = devm_kcalloc(dev, n: ud->rflow_cnt, size: sizeof(*ud->rflows), |
4933 | GFP_KERNEL); |
4934 | ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt), |
4935 | size: sizeof(unsigned long), GFP_KERNEL); |
4936 | |
4937 | if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans || |
4938 | !ud->rchans || !ud->rflows || !ud->rflow_in_use) |
4939 | return -ENOMEM; |
4940 | |
4941 | /* Get resource ranges from tisci */ |
4942 | for (i = 0; i < RM_RANGE_LAST; i++) { |
4943 | if (i == RM_RANGE_BCHAN) |
4944 | continue; |
4945 | |
4946 | tisci_rm->rm_ranges[i] = |
4947 | devm_ti_sci_get_of_resource(handle: tisci_rm->tisci, dev, |
4948 | dev_id: tisci_rm->tisci_dev_id, |
4949 | of_prop: (char *)range_names[i]); |
4950 | } |
4951 | |
4952 | /* tchan ranges */ |
4953 | rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; |
4954 | if (IS_ERR(ptr: rm_res)) { |
4955 | bitmap_zero(dst: ud->tchan_map, nbits: ud->tchan_cnt); |
4956 | } else { |
4957 | bitmap_fill(dst: ud->tchan_map, nbits: ud->tchan_cnt); |
4958 | for (i = 0; i < rm_res->sets; i++) |
4959 | udma_mark_resource_ranges(ud, map: ud->tchan_map, |
4960 | rm_desc: &rm_res->desc[i], name: "tchan" ); |
4961 | } |
4962 | |
4963 | /* rchan ranges */ |
4964 | rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; |
4965 | if (IS_ERR(ptr: rm_res)) { |
4966 | bitmap_zero(dst: ud->rchan_map, nbits: ud->rchan_cnt); |
4967 | } else { |
4968 | bitmap_fill(dst: ud->rchan_map, nbits: ud->rchan_cnt); |
4969 | for (i = 0; i < rm_res->sets; i++) |
4970 | udma_mark_resource_ranges(ud, map: ud->rchan_map, |
4971 | rm_desc: &rm_res->desc[i], name: "rchan" ); |
4972 | } |
4973 | |
4974 | /* rflow ranges */ |
4975 | rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; |
4976 | if (IS_ERR(ptr: rm_res)) { |
4977 | /* all rflows are assigned exclusively to Linux */ |
4978 | bitmap_zero(dst: ud->rflow_in_use, nbits: ud->rflow_cnt); |
4979 | irq_res.sets = 1; |
4980 | } else { |
4981 | bitmap_fill(dst: ud->rflow_in_use, nbits: ud->rflow_cnt); |
4982 | for (i = 0; i < rm_res->sets; i++) |
4983 | udma_mark_resource_ranges(ud, map: ud->rflow_in_use, |
4984 | rm_desc: &rm_res->desc[i], name: "rflow" ); |
4985 | irq_res.sets = rm_res->sets; |
4986 | } |
4987 | |
4988 | /* tflow ranges */ |
4989 | rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW]; |
4990 | if (IS_ERR(ptr: rm_res)) { |
4991 | /* all tflows are assigned exclusively to Linux */ |
4992 | bitmap_zero(dst: ud->tflow_map, nbits: ud->tflow_cnt); |
4993 | irq_res.sets++; |
4994 | } else { |
4995 | bitmap_fill(dst: ud->tflow_map, nbits: ud->tflow_cnt); |
4996 | for (i = 0; i < rm_res->sets; i++) |
4997 | udma_mark_resource_ranges(ud, map: ud->tflow_map, |
4998 | rm_desc: &rm_res->desc[i], name: "tflow" ); |
4999 | irq_res.sets += rm_res->sets; |
5000 | } |
5001 | |
5002 | irq_res.desc = kcalloc(n: irq_res.sets, size: sizeof(*irq_res.desc), GFP_KERNEL); |
5003 | if (!irq_res.desc) |
5004 | return -ENOMEM; |
5005 | rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW]; |
5006 | if (IS_ERR(ptr: rm_res)) { |
5007 | irq_res.desc[0].start = oes->pktdma_tchan_flow; |
5008 | irq_res.desc[0].num = ud->tflow_cnt; |
5009 | i = 1; |
5010 | } else { |
5011 | for (i = 0; i < rm_res->sets; i++) { |
5012 | irq_res.desc[i].start = rm_res->desc[i].start + |
5013 | oes->pktdma_tchan_flow; |
5014 | irq_res.desc[i].num = rm_res->desc[i].num; |
5015 | } |
5016 | } |
5017 | rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; |
5018 | if (IS_ERR(ptr: rm_res)) { |
5019 | irq_res.desc[i].start = oes->pktdma_rchan_flow; |
5020 | irq_res.desc[i].num = ud->rflow_cnt; |
5021 | } else { |
5022 | for (j = 0; j < rm_res->sets; j++, i++) { |
5023 | irq_res.desc[i].start = rm_res->desc[j].start + |
5024 | oes->pktdma_rchan_flow; |
5025 | irq_res.desc[i].num = rm_res->desc[j].num; |
5026 | } |
5027 | } |
5028 | ret = ti_sci_inta_msi_domain_alloc_irqs(dev: ud->dev, res: &irq_res); |
5029 | kfree(objp: irq_res.desc); |
5030 | if (ret) { |
5031 | dev_err(ud->dev, "Failed to allocate MSI interrupts\n" ); |
5032 | return ret; |
5033 | } |
5034 | |
5035 | return 0; |
5036 | } |
5037 | |
5038 | static int setup_resources(struct udma_dev *ud) |
5039 | { |
5040 | struct device *dev = ud->dev; |
5041 | int ch_count, ret; |
5042 | |
5043 | switch (ud->match_data->type) { |
5044 | case DMA_TYPE_UDMA: |
5045 | ret = udma_setup_resources(ud); |
5046 | break; |
5047 | case DMA_TYPE_BCDMA: |
5048 | ret = bcdma_setup_resources(ud); |
5049 | break; |
5050 | case DMA_TYPE_PKTDMA: |
5051 | ret = pktdma_setup_resources(ud); |
5052 | break; |
5053 | default: |
5054 | return -EINVAL; |
5055 | } |
5056 | |
5057 | if (ret) |
5058 | return ret; |
5059 | |
5060 | ch_count = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt; |
5061 | if (ud->bchan_cnt) |
5062 | ch_count -= bitmap_weight(src: ud->bchan_map, nbits: ud->bchan_cnt); |
5063 | ch_count -= bitmap_weight(src: ud->tchan_map, nbits: ud->tchan_cnt); |
5064 | ch_count -= bitmap_weight(src: ud->rchan_map, nbits: ud->rchan_cnt); |
5065 | if (!ch_count) |
5066 | return -ENODEV; |
5067 | |
5068 | ud->channels = devm_kcalloc(dev, n: ch_count, size: sizeof(*ud->channels), |
5069 | GFP_KERNEL); |
5070 | if (!ud->channels) |
5071 | return -ENOMEM; |
5072 | |
5073 | switch (ud->match_data->type) { |
5074 | case DMA_TYPE_UDMA: |
5075 | dev_info(dev, |
5076 | "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n" , |
5077 | ch_count, |
5078 | ud->tchan_cnt - bitmap_weight(ud->tchan_map, |
5079 | ud->tchan_cnt), |
5080 | ud->rchan_cnt - bitmap_weight(ud->rchan_map, |
5081 | ud->rchan_cnt), |
5082 | ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map, |
5083 | ud->rflow_cnt)); |
5084 | break; |
5085 | case DMA_TYPE_BCDMA: |
5086 | dev_info(dev, |
5087 | "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n" , |
5088 | ch_count, |
5089 | ud->bchan_cnt - bitmap_weight(ud->bchan_map, |
5090 | ud->bchan_cnt), |
5091 | ud->tchan_cnt - bitmap_weight(ud->tchan_map, |
5092 | ud->tchan_cnt), |
5093 | ud->rchan_cnt - bitmap_weight(ud->rchan_map, |
5094 | ud->rchan_cnt)); |
5095 | break; |
5096 | case DMA_TYPE_PKTDMA: |
5097 | dev_info(dev, |
5098 | "Channels: %d (tchan: %u, rchan: %u)\n" , |
5099 | ch_count, |
5100 | ud->tchan_cnt - bitmap_weight(ud->tchan_map, |
5101 | ud->tchan_cnt), |
5102 | ud->rchan_cnt - bitmap_weight(ud->rchan_map, |
5103 | ud->rchan_cnt)); |
5104 | break; |
5105 | default: |
5106 | break; |
5107 | } |
5108 | |
5109 | return ch_count; |
5110 | } |
5111 | |
5112 | static int udma_setup_rx_flush(struct udma_dev *ud) |
5113 | { |
5114 | struct udma_rx_flush *rx_flush = &ud->rx_flush; |
5115 | struct cppi5_desc_hdr_t *tr_desc; |
5116 | struct cppi5_tr_type1_t *tr_req; |
5117 | struct cppi5_host_desc_t *desc; |
5118 | struct device *dev = ud->dev; |
5119 | struct udma_hwdesc *hwdesc; |
5120 | size_t tr_size; |
5121 | |
5122 | /* Allocate 1K buffer for discarded data on RX channel teardown */ |
5123 | rx_flush->buffer_size = SZ_1K; |
5124 | rx_flush->buffer_vaddr = devm_kzalloc(dev, size: rx_flush->buffer_size, |
5125 | GFP_KERNEL); |
5126 | if (!rx_flush->buffer_vaddr) |
5127 | return -ENOMEM; |
5128 | |
5129 | rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr, |
5130 | rx_flush->buffer_size, |
5131 | DMA_TO_DEVICE); |
5132 | if (dma_mapping_error(dev, dma_addr: rx_flush->buffer_paddr)) |
5133 | return -ENOMEM; |
5134 | |
5135 | /* Set up descriptor to be used for TR mode */ |
5136 | hwdesc = &rx_flush->hwdescs[0]; |
5137 | tr_size = sizeof(struct cppi5_tr_type1_t); |
5138 | hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_count: tr_size, tr_size: 1); |
5139 | hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size, |
5140 | ud->desc_align); |
5141 | |
5142 | hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, size: hwdesc->cppi5_desc_size, |
5143 | GFP_KERNEL); |
5144 | if (!hwdesc->cppi5_desc_vaddr) |
5145 | return -ENOMEM; |
5146 | |
5147 | hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr, |
5148 | hwdesc->cppi5_desc_size, |
5149 | DMA_TO_DEVICE); |
5150 | if (dma_mapping_error(dev, dma_addr: hwdesc->cppi5_desc_paddr)) |
5151 | return -ENOMEM; |
5152 | |
5153 | /* Start of the TR req records */ |
5154 | hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size; |
5155 | /* Start address of the TR response array */ |
5156 | hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size; |
5157 | |
5158 | tr_desc = hwdesc->cppi5_desc_vaddr; |
5159 | cppi5_trdesc_init(desc_hdr: tr_desc, tr_count: 1, tr_size, reload_idx: 0, reload_count: 0); |
5160 | cppi5_desc_set_pktids(desc_hdr: tr_desc, pkt_id: 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT); |
5161 | cppi5_desc_set_retpolicy(desc_hdr: tr_desc, flags: 0, return_ring_id: 0); |
5162 | |
5163 | tr_req = hwdesc->tr_req_base; |
5164 | cppi5_tr_init(flags: &tr_req->flags, type: CPPI5_TR_TYPE1, static_tr: false, wait: false, |
5165 | event_size: CPPI5_TR_EVENT_SIZE_COMPLETION, cmd_id: 0); |
5166 | cppi5_tr_csf_set(flags: &tr_req->flags, CPPI5_TR_CSF_SUPR_EVT); |
5167 | |
5168 | tr_req->addr = rx_flush->buffer_paddr; |
5169 | tr_req->icnt0 = rx_flush->buffer_size; |
5170 | tr_req->icnt1 = 1; |
5171 | |
5172 | dma_sync_single_for_device(dev, addr: hwdesc->cppi5_desc_paddr, |
5173 | size: hwdesc->cppi5_desc_size, dir: DMA_TO_DEVICE); |
5174 | |
5175 | /* Set up descriptor to be used for packet mode */ |
5176 | hwdesc = &rx_flush->hwdescs[1]; |
5177 | hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) + |
5178 | CPPI5_INFO0_HDESC_EPIB_SIZE + |
5179 | CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE, |
5180 | ud->desc_align); |
5181 | |
5182 | hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, size: hwdesc->cppi5_desc_size, |
5183 | GFP_KERNEL); |
5184 | if (!hwdesc->cppi5_desc_vaddr) |
5185 | return -ENOMEM; |
5186 | |
5187 | hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr, |
5188 | hwdesc->cppi5_desc_size, |
5189 | DMA_TO_DEVICE); |
5190 | if (dma_mapping_error(dev, dma_addr: hwdesc->cppi5_desc_paddr)) |
5191 | return -ENOMEM; |
5192 | |
5193 | desc = hwdesc->cppi5_desc_vaddr; |
5194 | cppi5_hdesc_init(desc, flags: 0, psdata_size: 0); |
5195 | cppi5_desc_set_pktids(desc_hdr: &desc->hdr, pkt_id: 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT); |
5196 | cppi5_desc_set_retpolicy(desc_hdr: &desc->hdr, flags: 0, return_ring_id: 0); |
5197 | |
5198 | cppi5_hdesc_attach_buf(desc, |
5199 | buf: rx_flush->buffer_paddr, buf_data_len: rx_flush->buffer_size, |
5200 | obuf: rx_flush->buffer_paddr, obuf_len: rx_flush->buffer_size); |
5201 | |
5202 | dma_sync_single_for_device(dev, addr: hwdesc->cppi5_desc_paddr, |
5203 | size: hwdesc->cppi5_desc_size, dir: DMA_TO_DEVICE); |
5204 | return 0; |
5205 | } |
5206 | |
5207 | #ifdef CONFIG_DEBUG_FS |
5208 | static void udma_dbg_summary_show_chan(struct seq_file *s, |
5209 | struct dma_chan *chan) |
5210 | { |
5211 | struct udma_chan *uc = to_udma_chan(c: chan); |
5212 | struct udma_chan_config *ucc = &uc->config; |
5213 | |
5214 | seq_printf(m: s, fmt: " %-13s| %s" , dma_chan_name(chan), |
5215 | chan->dbg_client_name ?: "in-use" ); |
5216 | if (ucc->tr_trigger_type) |
5217 | seq_puts(m: s, s: " (triggered, " ); |
5218 | else |
5219 | seq_printf(m: s, fmt: " (%s, " , |
5220 | dmaengine_get_direction_text(dir: uc->config.dir)); |
5221 | |
5222 | switch (uc->config.dir) { |
5223 | case DMA_MEM_TO_MEM: |
5224 | if (uc->ud->match_data->type == DMA_TYPE_BCDMA) { |
5225 | seq_printf(m: s, fmt: "bchan%d)\n" , uc->bchan->id); |
5226 | return; |
5227 | } |
5228 | |
5229 | seq_printf(m: s, fmt: "chan%d pair [0x%04x -> 0x%04x], " , uc->tchan->id, |
5230 | ucc->src_thread, ucc->dst_thread); |
5231 | break; |
5232 | case DMA_DEV_TO_MEM: |
5233 | seq_printf(m: s, fmt: "rchan%d [0x%04x -> 0x%04x], " , uc->rchan->id, |
5234 | ucc->src_thread, ucc->dst_thread); |
5235 | if (uc->ud->match_data->type == DMA_TYPE_PKTDMA) |
5236 | seq_printf(m: s, fmt: "rflow%d, " , uc->rflow->id); |
5237 | break; |
5238 | case DMA_MEM_TO_DEV: |
5239 | seq_printf(m: s, fmt: "tchan%d [0x%04x -> 0x%04x], " , uc->tchan->id, |
5240 | ucc->src_thread, ucc->dst_thread); |
5241 | if (uc->ud->match_data->type == DMA_TYPE_PKTDMA) |
5242 | seq_printf(m: s, fmt: "tflow%d, " , uc->tchan->tflow_id); |
5243 | break; |
5244 | default: |
5245 | seq_printf(m: s, fmt: ")\n" ); |
5246 | return; |
5247 | } |
5248 | |
5249 | if (ucc->ep_type == PSIL_EP_NATIVE) { |
5250 | seq_printf(m: s, fmt: "PSI-L Native" ); |
5251 | if (ucc->metadata_size) { |
5252 | seq_printf(m: s, fmt: "[%s" , ucc->needs_epib ? " EPIB" : "" ); |
5253 | if (ucc->psd_size) |
5254 | seq_printf(m: s, fmt: " PSDsize:%u" , ucc->psd_size); |
5255 | seq_printf(m: s, fmt: " ]" ); |
5256 | } |
5257 | } else { |
5258 | seq_printf(m: s, fmt: "PDMA" ); |
5259 | if (ucc->enable_acc32 || ucc->enable_burst) |
5260 | seq_printf(m: s, fmt: "[%s%s ]" , |
5261 | ucc->enable_acc32 ? " ACC32" : "" , |
5262 | ucc->enable_burst ? " BURST" : "" ); |
5263 | } |
5264 | |
5265 | seq_printf(m: s, fmt: ", %s)\n" , ucc->pkt_mode ? "Packet mode" : "TR mode" ); |
5266 | } |
5267 | |
5268 | static void udma_dbg_summary_show(struct seq_file *s, |
5269 | struct dma_device *dma_dev) |
5270 | { |
5271 | struct dma_chan *chan; |
5272 | |
5273 | list_for_each_entry(chan, &dma_dev->channels, device_node) { |
5274 | if (chan->client_count) |
5275 | udma_dbg_summary_show_chan(s, chan); |
5276 | } |
5277 | } |
5278 | #endif /* CONFIG_DEBUG_FS */ |
5279 | |
5280 | static enum dmaengine_alignment udma_get_copy_align(struct udma_dev *ud) |
5281 | { |
5282 | const struct udma_match_data *match_data = ud->match_data; |
5283 | u8 tpl; |
5284 | |
5285 | if (!match_data->enable_memcpy_support) |
5286 | return DMAENGINE_ALIGN_8_BYTES; |
5287 | |
5288 | /* Get the highest TPL level the device supports for memcpy */ |
5289 | if (ud->bchan_cnt) |
5290 | tpl = udma_get_chan_tpl_index(tpl_map: &ud->bchan_tpl, chan_id: 0); |
5291 | else if (ud->tchan_cnt) |
5292 | tpl = udma_get_chan_tpl_index(tpl_map: &ud->tchan_tpl, chan_id: 0); |
5293 | else |
5294 | return DMAENGINE_ALIGN_8_BYTES; |
5295 | |
5296 | switch (match_data->burst_size[tpl]) { |
5297 | case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES: |
5298 | return DMAENGINE_ALIGN_256_BYTES; |
5299 | case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES: |
5300 | return DMAENGINE_ALIGN_128_BYTES; |
5301 | case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES: |
5302 | fallthrough; |
5303 | default: |
5304 | return DMAENGINE_ALIGN_64_BYTES; |
5305 | } |
5306 | } |
5307 | |
5308 | #define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ |
5309 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ |
5310 | BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ |
5311 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ |
5312 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) |
5313 | |
5314 | static int udma_probe(struct platform_device *pdev) |
5315 | { |
5316 | struct device_node *navss_node = pdev->dev.parent->of_node; |
5317 | const struct soc_device_attribute *soc; |
5318 | struct device *dev = &pdev->dev; |
5319 | struct udma_dev *ud; |
5320 | const struct of_device_id *match; |
5321 | int i, ret; |
5322 | int ch_count; |
5323 | |
5324 | ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48)); |
5325 | if (ret) |
5326 | dev_err(dev, "failed to set dma mask stuff\n" ); |
5327 | |
5328 | ud = devm_kzalloc(dev, size: sizeof(*ud), GFP_KERNEL); |
5329 | if (!ud) |
5330 | return -ENOMEM; |
5331 | |
5332 | match = of_match_node(matches: udma_of_match, node: dev->of_node); |
5333 | if (!match) { |
5334 | dev_err(dev, "No compatible match found\n" ); |
5335 | return -ENODEV; |
5336 | } |
5337 | ud->match_data = match->data; |
5338 | |
5339 | ud->soc_data = ud->match_data->soc_data; |
5340 | if (!ud->soc_data) { |
5341 | soc = soc_device_match(matches: k3_soc_devices); |
5342 | if (!soc) { |
5343 | dev_err(dev, "No compatible SoC found\n" ); |
5344 | return -ENODEV; |
5345 | } |
5346 | ud->soc_data = soc->data; |
5347 | } |
5348 | |
5349 | ret = udma_get_mmrs(pdev, ud); |
5350 | if (ret) |
5351 | return ret; |
5352 | |
5353 | ud->tisci_rm.tisci = ti_sci_get_by_phandle(np: dev->of_node, property: "ti,sci" ); |
5354 | if (IS_ERR(ptr: ud->tisci_rm.tisci)) |
5355 | return PTR_ERR(ptr: ud->tisci_rm.tisci); |
5356 | |
5357 | ret = of_property_read_u32(np: dev->of_node, propname: "ti,sci-dev-id" , |
5358 | out_value: &ud->tisci_rm.tisci_dev_id); |
5359 | if (ret) { |
5360 | dev_err(dev, "ti,sci-dev-id read failure %d\n" , ret); |
5361 | return ret; |
5362 | } |
5363 | pdev->id = ud->tisci_rm.tisci_dev_id; |
5364 | |
5365 | ret = of_property_read_u32(np: navss_node, propname: "ti,sci-dev-id" , |
5366 | out_value: &ud->tisci_rm.tisci_navss_dev_id); |
5367 | if (ret) { |
5368 | dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n" , ret); |
5369 | return ret; |
5370 | } |
5371 | |
5372 | if (ud->match_data->type == DMA_TYPE_UDMA) { |
5373 | ret = of_property_read_u32(np: dev->of_node, propname: "ti,udma-atype" , |
5374 | out_value: &ud->atype); |
5375 | if (!ret && ud->atype > 2) { |
5376 | dev_err(dev, "Invalid atype: %u\n" , ud->atype); |
5377 | return -EINVAL; |
5378 | } |
5379 | } else { |
5380 | ret = of_property_read_u32(np: dev->of_node, propname: "ti,asel" , |
5381 | out_value: &ud->asel); |
5382 | if (!ret && ud->asel > 15) { |
5383 | dev_err(dev, "Invalid asel: %u\n" , ud->asel); |
5384 | return -EINVAL; |
5385 | } |
5386 | } |
5387 | |
5388 | ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops; |
5389 | ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops; |
5390 | |
5391 | if (ud->match_data->type == DMA_TYPE_UDMA) { |
5392 | ud->ringacc = of_k3_ringacc_get_by_phandle(np: dev->of_node, property: "ti,ringacc" ); |
5393 | } else { |
5394 | struct k3_ringacc_init_data ring_init_data; |
5395 | |
5396 | ring_init_data.tisci = ud->tisci_rm.tisci; |
5397 | ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id; |
5398 | if (ud->match_data->type == DMA_TYPE_BCDMA) { |
5399 | ring_init_data.num_rings = ud->bchan_cnt + |
5400 | ud->tchan_cnt + |
5401 | ud->rchan_cnt; |
5402 | } else { |
5403 | ring_init_data.num_rings = ud->rflow_cnt + |
5404 | ud->tflow_cnt; |
5405 | } |
5406 | |
5407 | ud->ringacc = k3_ringacc_dmarings_init(pdev, data: &ring_init_data); |
5408 | } |
5409 | |
5410 | if (IS_ERR(ptr: ud->ringacc)) |
5411 | return PTR_ERR(ptr: ud->ringacc); |
5412 | |
5413 | dev->msi.domain = of_msi_get_domain(dev, np: dev->of_node, |
5414 | token: DOMAIN_BUS_TI_SCI_INTA_MSI); |
5415 | if (!dev->msi.domain) { |
5416 | return -EPROBE_DEFER; |
5417 | } |
5418 | |
5419 | dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask); |
5420 | /* cyclic operation is not supported via PKTDMA */ |
5421 | if (ud->match_data->type != DMA_TYPE_PKTDMA) { |
5422 | dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask); |
5423 | ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic; |
5424 | } |
5425 | |
5426 | ud->ddev.device_config = udma_slave_config; |
5427 | ud->ddev.device_prep_slave_sg = udma_prep_slave_sg; |
5428 | ud->ddev.device_issue_pending = udma_issue_pending; |
5429 | ud->ddev.device_tx_status = udma_tx_status; |
5430 | ud->ddev.device_pause = udma_pause; |
5431 | ud->ddev.device_resume = udma_resume; |
5432 | ud->ddev.device_terminate_all = udma_terminate_all; |
5433 | ud->ddev.device_synchronize = udma_synchronize; |
5434 | #ifdef CONFIG_DEBUG_FS |
5435 | ud->ddev.dbg_summary_show = udma_dbg_summary_show; |
5436 | #endif |
5437 | |
5438 | switch (ud->match_data->type) { |
5439 | case DMA_TYPE_UDMA: |
5440 | ud->ddev.device_alloc_chan_resources = |
5441 | udma_alloc_chan_resources; |
5442 | break; |
5443 | case DMA_TYPE_BCDMA: |
5444 | ud->ddev.device_alloc_chan_resources = |
5445 | bcdma_alloc_chan_resources; |
5446 | ud->ddev.device_router_config = bcdma_router_config; |
5447 | break; |
5448 | case DMA_TYPE_PKTDMA: |
5449 | ud->ddev.device_alloc_chan_resources = |
5450 | pktdma_alloc_chan_resources; |
5451 | break; |
5452 | default: |
5453 | return -EINVAL; |
5454 | } |
5455 | ud->ddev.device_free_chan_resources = udma_free_chan_resources; |
5456 | |
5457 | ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS; |
5458 | ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS; |
5459 | ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); |
5460 | ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
5461 | ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT | |
5462 | DESC_METADATA_ENGINE; |
5463 | if (ud->match_data->enable_memcpy_support && |
5464 | !(ud->match_data->type == DMA_TYPE_BCDMA && ud->bchan_cnt == 0)) { |
5465 | dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask); |
5466 | ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy; |
5467 | ud->ddev.directions |= BIT(DMA_MEM_TO_MEM); |
5468 | } |
5469 | |
5470 | ud->ddev.dev = dev; |
5471 | ud->dev = dev; |
5472 | ud->psil_base = ud->match_data->psil_base; |
5473 | |
5474 | INIT_LIST_HEAD(list: &ud->ddev.channels); |
5475 | INIT_LIST_HEAD(list: &ud->desc_to_purge); |
5476 | |
5477 | ch_count = setup_resources(ud); |
5478 | if (ch_count <= 0) |
5479 | return ch_count; |
5480 | |
5481 | spin_lock_init(&ud->lock); |
5482 | INIT_WORK(&ud->purge_work, udma_purge_desc_work); |
5483 | |
5484 | ud->desc_align = 64; |
5485 | if (ud->desc_align < dma_get_cache_alignment()) |
5486 | ud->desc_align = dma_get_cache_alignment(); |
5487 | |
5488 | ret = udma_setup_rx_flush(ud); |
5489 | if (ret) |
5490 | return ret; |
5491 | |
5492 | for (i = 0; i < ud->bchan_cnt; i++) { |
5493 | struct udma_bchan *bchan = &ud->bchans[i]; |
5494 | |
5495 | bchan->id = i; |
5496 | bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000; |
5497 | } |
5498 | |
5499 | for (i = 0; i < ud->tchan_cnt; i++) { |
5500 | struct udma_tchan *tchan = &ud->tchans[i]; |
5501 | |
5502 | tchan->id = i; |
5503 | tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000; |
5504 | } |
5505 | |
5506 | for (i = 0; i < ud->rchan_cnt; i++) { |
5507 | struct udma_rchan *rchan = &ud->rchans[i]; |
5508 | |
5509 | rchan->id = i; |
5510 | rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000; |
5511 | } |
5512 | |
5513 | for (i = 0; i < ud->rflow_cnt; i++) { |
5514 | struct udma_rflow *rflow = &ud->rflows[i]; |
5515 | |
5516 | rflow->id = i; |
5517 | } |
5518 | |
5519 | for (i = 0; i < ch_count; i++) { |
5520 | struct udma_chan *uc = &ud->channels[i]; |
5521 | |
5522 | uc->ud = ud; |
5523 | uc->vc.desc_free = udma_desc_free; |
5524 | uc->id = i; |
5525 | uc->bchan = NULL; |
5526 | uc->tchan = NULL; |
5527 | uc->rchan = NULL; |
5528 | uc->config.remote_thread_id = -1; |
5529 | uc->config.mapped_channel_id = -1; |
5530 | uc->config.default_flow_id = -1; |
5531 | uc->config.dir = DMA_MEM_TO_MEM; |
5532 | uc->name = devm_kasprintf(dev, GFP_KERNEL, fmt: "%s chan%d" , |
5533 | dev_name(dev), i); |
5534 | |
5535 | vchan_init(vc: &uc->vc, dmadev: &ud->ddev); |
5536 | /* Use custom vchan completion handling */ |
5537 | tasklet_setup(t: &uc->vc.task, callback: udma_vchan_complete); |
5538 | init_completion(x: &uc->teardown_completed); |
5539 | INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion); |
5540 | } |
5541 | |
5542 | /* Configure the copy_align to the maximum burst size the device supports */ |
5543 | ud->ddev.copy_align = udma_get_copy_align(ud); |
5544 | |
5545 | ret = dma_async_device_register(device: &ud->ddev); |
5546 | if (ret) { |
5547 | dev_err(dev, "failed to register slave DMA engine: %d\n" , ret); |
5548 | return ret; |
5549 | } |
5550 | |
5551 | platform_set_drvdata(pdev, data: ud); |
5552 | |
5553 | ret = of_dma_controller_register(np: dev->of_node, of_dma_xlate: udma_of_xlate, data: ud); |
5554 | if (ret) { |
5555 | dev_err(dev, "failed to register of_dma controller\n" ); |
5556 | dma_async_device_unregister(device: &ud->ddev); |
5557 | } |
5558 | |
5559 | return ret; |
5560 | } |
5561 | |
5562 | static int __maybe_unused udma_pm_suspend(struct device *dev) |
5563 | { |
5564 | struct udma_dev *ud = dev_get_drvdata(dev); |
5565 | struct dma_device *dma_dev = &ud->ddev; |
5566 | struct dma_chan *chan; |
5567 | struct udma_chan *uc; |
5568 | |
5569 | list_for_each_entry(chan, &dma_dev->channels, device_node) { |
5570 | if (chan->client_count) { |
5571 | uc = to_udma_chan(c: chan); |
5572 | /* backup the channel configuration */ |
5573 | memcpy(&uc->backup_config, &uc->config, |
5574 | sizeof(struct udma_chan_config)); |
5575 | dev_dbg(dev, "Suspending channel %s\n" , |
5576 | dma_chan_name(chan)); |
5577 | ud->ddev.device_free_chan_resources(chan); |
5578 | } |
5579 | } |
5580 | |
5581 | return 0; |
5582 | } |
5583 | |
5584 | static int __maybe_unused udma_pm_resume(struct device *dev) |
5585 | { |
5586 | struct udma_dev *ud = dev_get_drvdata(dev); |
5587 | struct dma_device *dma_dev = &ud->ddev; |
5588 | struct dma_chan *chan; |
5589 | struct udma_chan *uc; |
5590 | int ret; |
5591 | |
5592 | list_for_each_entry(chan, &dma_dev->channels, device_node) { |
5593 | if (chan->client_count) { |
5594 | uc = to_udma_chan(c: chan); |
5595 | /* restore the channel configuration */ |
5596 | memcpy(&uc->config, &uc->backup_config, |
5597 | sizeof(struct udma_chan_config)); |
5598 | dev_dbg(dev, "Resuming channel %s\n" , |
5599 | dma_chan_name(chan)); |
5600 | ret = ud->ddev.device_alloc_chan_resources(chan); |
5601 | if (ret) |
5602 | return ret; |
5603 | } |
5604 | } |
5605 | |
5606 | return 0; |
5607 | } |
5608 | |
5609 | static const struct dev_pm_ops udma_pm_ops = { |
5610 | SET_LATE_SYSTEM_SLEEP_PM_OPS(udma_pm_suspend, udma_pm_resume) |
5611 | }; |
5612 | |
5613 | static struct platform_driver udma_driver = { |
5614 | .driver = { |
5615 | .name = "ti-udma" , |
5616 | .of_match_table = udma_of_match, |
5617 | .suppress_bind_attrs = true, |
5618 | .pm = &udma_pm_ops, |
5619 | }, |
5620 | .probe = udma_probe, |
5621 | }; |
5622 | |
5623 | module_platform_driver(udma_driver); |
5624 | MODULE_LICENSE("GPL v2" ); |
5625 | |
5626 | /* Private interfaces to UDMA */ |
5627 | #include "k3-udma-private.c" |
5628 | |