1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * TI EDMA DMA engine driver |
4 | * |
5 | * Copyright 2012 Texas Instruments |
6 | */ |
7 | |
8 | #include <linux/dmaengine.h> |
9 | #include <linux/dma-mapping.h> |
10 | #include <linux/bitmap.h> |
11 | #include <linux/err.h> |
12 | #include <linux/init.h> |
13 | #include <linux/interrupt.h> |
14 | #include <linux/list.h> |
15 | #include <linux/module.h> |
16 | #include <linux/platform_device.h> |
17 | #include <linux/slab.h> |
18 | #include <linux/spinlock.h> |
19 | #include <linux/of.h> |
20 | #include <linux/of_dma.h> |
21 | #include <linux/of_irq.h> |
22 | #include <linux/of_address.h> |
23 | #include <linux/pm_runtime.h> |
24 | |
25 | #include <linux/platform_data/edma.h> |
26 | |
27 | #include "../dmaengine.h" |
28 | #include "../virt-dma.h" |
29 | |
30 | /* Offsets matching "struct edmacc_param" */ |
31 | #define PARM_OPT 0x00 |
32 | #define PARM_SRC 0x04 |
33 | #define PARM_A_B_CNT 0x08 |
34 | #define PARM_DST 0x0c |
35 | #define PARM_SRC_DST_BIDX 0x10 |
36 | #define PARM_LINK_BCNTRLD 0x14 |
37 | #define PARM_SRC_DST_CIDX 0x18 |
38 | #define PARM_CCNT 0x1c |
39 | |
40 | #define PARM_SIZE 0x20 |
41 | |
42 | /* Offsets for EDMA CC global channel registers and their shadows */ |
43 | #define SH_ER 0x00 /* 64 bits */ |
44 | #define SH_ECR 0x08 /* 64 bits */ |
45 | #define SH_ESR 0x10 /* 64 bits */ |
46 | #define SH_CER 0x18 /* 64 bits */ |
47 | #define SH_EER 0x20 /* 64 bits */ |
48 | #define SH_EECR 0x28 /* 64 bits */ |
49 | #define SH_EESR 0x30 /* 64 bits */ |
50 | #define SH_SER 0x38 /* 64 bits */ |
51 | #define SH_SECR 0x40 /* 64 bits */ |
52 | #define SH_IER 0x50 /* 64 bits */ |
53 | #define SH_IECR 0x58 /* 64 bits */ |
54 | #define SH_IESR 0x60 /* 64 bits */ |
55 | #define SH_IPR 0x68 /* 64 bits */ |
56 | #define SH_ICR 0x70 /* 64 bits */ |
57 | #define SH_IEVAL 0x78 |
58 | #define SH_QER 0x80 |
59 | #define SH_QEER 0x84 |
60 | #define SH_QEECR 0x88 |
61 | #define SH_QEESR 0x8c |
62 | #define SH_QSER 0x90 |
63 | #define SH_QSECR 0x94 |
64 | #define SH_SIZE 0x200 |
65 | |
66 | /* Offsets for EDMA CC global registers */ |
67 | #define EDMA_REV 0x0000 |
68 | #define EDMA_CCCFG 0x0004 |
69 | #define EDMA_QCHMAP 0x0200 /* 8 registers */ |
70 | #define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */ |
71 | #define EDMA_QDMAQNUM 0x0260 |
72 | #define EDMA_QUETCMAP 0x0280 |
73 | #define EDMA_QUEPRI 0x0284 |
74 | #define EDMA_EMR 0x0300 /* 64 bits */ |
75 | #define EDMA_EMCR 0x0308 /* 64 bits */ |
76 | #define EDMA_QEMR 0x0310 |
77 | #define EDMA_QEMCR 0x0314 |
78 | #define EDMA_CCERR 0x0318 |
79 | #define EDMA_CCERRCLR 0x031c |
80 | #define EDMA_EEVAL 0x0320 |
81 | #define EDMA_DRAE 0x0340 /* 4 x 64 bits*/ |
82 | #define EDMA_QRAE 0x0380 /* 4 registers */ |
83 | #define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */ |
84 | #define EDMA_QSTAT 0x0600 /* 2 registers */ |
85 | #define EDMA_QWMTHRA 0x0620 |
86 | #define EDMA_QWMTHRB 0x0624 |
87 | #define EDMA_CCSTAT 0x0640 |
88 | |
89 | #define EDMA_M 0x1000 /* global channel registers */ |
90 | #define EDMA_ECR 0x1008 |
91 | #define EDMA_ECRH 0x100C |
92 | #define EDMA_SHADOW0 0x2000 /* 4 shadow regions */ |
93 | #define EDMA_PARM 0x4000 /* PaRAM entries */ |
94 | |
95 | #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5)) |
96 | |
97 | #define EDMA_DCHMAP 0x0100 /* 64 registers */ |
98 | |
99 | /* CCCFG register */ |
100 | #define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */ |
101 | #define GET_NUM_QDMACH(x) ((x & 0x70) >> 4) /* bits 4-6 */ |
102 | #define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */ |
103 | #define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */ |
104 | #define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */ |
105 | #define CHMAP_EXIST BIT(24) |
106 | |
107 | /* CCSTAT register */ |
108 | #define EDMA_CCSTAT_ACTV BIT(4) |
109 | |
110 | /* |
111 | * Max of 20 segments per channel to conserve PaRAM slots |
112 | * Also note that MAX_NR_SG should be at least the no.of periods |
113 | * that are required for ASoC, otherwise DMA prep calls will |
114 | * fail. Today davinci-pcm is the only user of this driver and |
115 | * requires at least 17 slots, so we setup the default to 20. |
116 | */ |
117 | #define MAX_NR_SG 20 |
118 | #define EDMA_MAX_SLOTS MAX_NR_SG |
119 | #define EDMA_DESCRIPTORS 16 |
120 | |
121 | #define EDMA_CHANNEL_ANY -1 /* for edma_alloc_channel() */ |
122 | #define EDMA_SLOT_ANY -1 /* for edma_alloc_slot() */ |
123 | #define EDMA_CONT_PARAMS_ANY 1001 |
124 | #define EDMA_CONT_PARAMS_FIXED_EXACT 1002 |
125 | #define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003 |
126 | |
127 | /* |
128 | * 64bit array registers are split into two 32bit registers: |
129 | * reg0: channel/event 0-31 |
130 | * reg1: channel/event 32-63 |
131 | * |
132 | * bit 5 in the channel number tells the array index (0/1) |
133 | * bit 0-4 (0x1f) is the bit offset within the register |
134 | */ |
135 | #define EDMA_REG_ARRAY_INDEX(channel) ((channel) >> 5) |
136 | #define EDMA_CHANNEL_BIT(channel) (BIT((channel) & 0x1f)) |
137 | |
138 | /* PaRAM slots are laid out like this */ |
139 | struct edmacc_param { |
140 | u32 opt; |
141 | u32 src; |
142 | u32 a_b_cnt; |
143 | u32 dst; |
144 | u32 src_dst_bidx; |
145 | u32 link_bcntrld; |
146 | u32 src_dst_cidx; |
147 | u32 ccnt; |
148 | } __packed; |
149 | |
150 | /* fields in edmacc_param.opt */ |
151 | #define SAM BIT(0) |
152 | #define DAM BIT(1) |
153 | #define SYNCDIM BIT(2) |
154 | #define STATIC BIT(3) |
155 | #define EDMA_FWID (0x07 << 8) |
156 | #define TCCMODE BIT(11) |
157 | #define EDMA_TCC(t) ((t) << 12) |
158 | #define TCINTEN BIT(20) |
159 | #define ITCINTEN BIT(21) |
160 | #define TCCHEN BIT(22) |
161 | #define ITCCHEN BIT(23) |
162 | |
163 | struct edma_pset { |
164 | u32 len; |
165 | dma_addr_t addr; |
166 | struct edmacc_param param; |
167 | }; |
168 | |
169 | struct edma_desc { |
170 | struct virt_dma_desc vdesc; |
171 | struct list_head node; |
172 | enum dma_transfer_direction direction; |
173 | int cyclic; |
174 | bool polled; |
175 | int absync; |
176 | int pset_nr; |
177 | struct edma_chan *echan; |
178 | int processed; |
179 | |
180 | /* |
181 | * The following 4 elements are used for residue accounting. |
182 | * |
183 | * - processed_stat: the number of SG elements we have traversed |
184 | * so far to cover accounting. This is updated directly to processed |
185 | * during edma_callback and is always <= processed, because processed |
186 | * refers to the number of pending transfer (programmed to EDMA |
187 | * controller), where as processed_stat tracks number of transfers |
188 | * accounted for so far. |
189 | * |
190 | * - residue: The amount of bytes we have left to transfer for this desc |
191 | * |
192 | * - residue_stat: The residue in bytes of data we have covered |
193 | * so far for accounting. This is updated directly to residue |
194 | * during callbacks to keep it current. |
195 | * |
196 | * - sg_len: Tracks the length of the current intermediate transfer, |
197 | * this is required to update the residue during intermediate transfer |
198 | * completion callback. |
199 | */ |
200 | int processed_stat; |
201 | u32 sg_len; |
202 | u32 residue; |
203 | u32 residue_stat; |
204 | |
205 | struct edma_pset pset[] __counted_by(pset_nr); |
206 | }; |
207 | |
208 | struct edma_cc; |
209 | |
210 | struct edma_tc { |
211 | struct device_node *node; |
212 | u16 id; |
213 | }; |
214 | |
215 | struct edma_chan { |
216 | struct virt_dma_chan vchan; |
217 | struct list_head node; |
218 | struct edma_desc *edesc; |
219 | struct edma_cc *ecc; |
220 | struct edma_tc *tc; |
221 | int ch_num; |
222 | bool alloced; |
223 | bool hw_triggered; |
224 | int slot[EDMA_MAX_SLOTS]; |
225 | int missed; |
226 | struct dma_slave_config cfg; |
227 | }; |
228 | |
229 | struct edma_cc { |
230 | struct device *dev; |
231 | struct edma_soc_info *info; |
232 | void __iomem *base; |
233 | int id; |
234 | bool legacy_mode; |
235 | |
236 | /* eDMA3 resource information */ |
237 | unsigned num_channels; |
238 | unsigned num_qchannels; |
239 | unsigned num_region; |
240 | unsigned num_slots; |
241 | unsigned num_tc; |
242 | bool chmap_exist; |
243 | enum dma_event_q default_queue; |
244 | |
245 | unsigned int ccint; |
246 | unsigned int ccerrint; |
247 | |
248 | /* |
249 | * The slot_inuse bit for each PaRAM slot is clear unless the slot is |
250 | * in use by Linux or if it is allocated to be used by DSP. |
251 | */ |
252 | unsigned long *slot_inuse; |
253 | |
254 | /* |
255 | * For tracking reserved channels used by DSP. |
256 | * If the bit is cleared, the channel is allocated to be used by DSP |
257 | * and Linux must not touch it. |
258 | */ |
259 | unsigned long *channels_mask; |
260 | |
261 | struct dma_device dma_slave; |
262 | struct dma_device *dma_memcpy; |
263 | struct edma_chan *slave_chans; |
264 | struct edma_tc *tc_list; |
265 | int dummy_slot; |
266 | }; |
267 | |
268 | /* dummy param set used to (re)initialize parameter RAM slots */ |
269 | static const struct edmacc_param dummy_paramset = { |
270 | .link_bcntrld = 0xffff, |
271 | .ccnt = 1, |
272 | }; |
273 | |
274 | #define EDMA_BINDING_LEGACY 0 |
275 | #define EDMA_BINDING_TPCC 1 |
276 | static const u32 edma_binding_type[] = { |
277 | [EDMA_BINDING_LEGACY] = EDMA_BINDING_LEGACY, |
278 | [EDMA_BINDING_TPCC] = EDMA_BINDING_TPCC, |
279 | }; |
280 | |
281 | static const struct of_device_id edma_of_ids[] = { |
282 | { |
283 | .compatible = "ti,edma3" , |
284 | .data = &edma_binding_type[EDMA_BINDING_LEGACY], |
285 | }, |
286 | { |
287 | .compatible = "ti,edma3-tpcc" , |
288 | .data = &edma_binding_type[EDMA_BINDING_TPCC], |
289 | }, |
290 | {} |
291 | }; |
292 | MODULE_DEVICE_TABLE(of, edma_of_ids); |
293 | |
294 | static const struct of_device_id edma_tptc_of_ids[] = { |
295 | { .compatible = "ti,edma3-tptc" , }, |
296 | {} |
297 | }; |
298 | MODULE_DEVICE_TABLE(of, edma_tptc_of_ids); |
299 | |
300 | static inline unsigned int edma_read(struct edma_cc *ecc, int offset) |
301 | { |
302 | return (unsigned int)__raw_readl(addr: ecc->base + offset); |
303 | } |
304 | |
305 | static inline void edma_write(struct edma_cc *ecc, int offset, int val) |
306 | { |
307 | __raw_writel(val, addr: ecc->base + offset); |
308 | } |
309 | |
310 | static inline void edma_modify(struct edma_cc *ecc, int offset, unsigned and, |
311 | unsigned or) |
312 | { |
313 | unsigned val = edma_read(ecc, offset); |
314 | |
315 | val &= and; |
316 | val |= or; |
317 | edma_write(ecc, offset, val); |
318 | } |
319 | |
320 | static inline void edma_or(struct edma_cc *ecc, int offset, unsigned or) |
321 | { |
322 | unsigned val = edma_read(ecc, offset); |
323 | |
324 | val |= or; |
325 | edma_write(ecc, offset, val); |
326 | } |
327 | |
328 | static inline unsigned int edma_read_array(struct edma_cc *ecc, int offset, |
329 | int i) |
330 | { |
331 | return edma_read(ecc, offset: offset + (i << 2)); |
332 | } |
333 | |
334 | static inline void edma_write_array(struct edma_cc *ecc, int offset, int i, |
335 | unsigned val) |
336 | { |
337 | edma_write(ecc, offset: offset + (i << 2), val); |
338 | } |
339 | |
340 | static inline void edma_modify_array(struct edma_cc *ecc, int offset, int i, |
341 | unsigned and, unsigned or) |
342 | { |
343 | edma_modify(ecc, offset: offset + (i << 2), and, or); |
344 | } |
345 | |
346 | static inline void edma_or_array2(struct edma_cc *ecc, int offset, int i, int j, |
347 | unsigned or) |
348 | { |
349 | edma_or(ecc, offset: offset + ((i * 2 + j) << 2), or); |
350 | } |
351 | |
352 | static inline void edma_write_array2(struct edma_cc *ecc, int offset, int i, |
353 | int j, unsigned val) |
354 | { |
355 | edma_write(ecc, offset: offset + ((i * 2 + j) << 2), val); |
356 | } |
357 | |
358 | static inline unsigned int edma_shadow0_read_array(struct edma_cc *ecc, |
359 | int offset, int i) |
360 | { |
361 | return edma_read(ecc, EDMA_SHADOW0 + offset + (i << 2)); |
362 | } |
363 | |
364 | static inline void edma_shadow0_write(struct edma_cc *ecc, int offset, |
365 | unsigned val) |
366 | { |
367 | edma_write(ecc, EDMA_SHADOW0 + offset, val); |
368 | } |
369 | |
370 | static inline void edma_shadow0_write_array(struct edma_cc *ecc, int offset, |
371 | int i, unsigned val) |
372 | { |
373 | edma_write(ecc, EDMA_SHADOW0 + offset + (i << 2), val); |
374 | } |
375 | |
376 | static inline void edma_param_modify(struct edma_cc *ecc, int offset, |
377 | int param_no, unsigned and, unsigned or) |
378 | { |
379 | edma_modify(ecc, EDMA_PARM + offset + (param_no << 5), and, or); |
380 | } |
381 | |
382 | static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no, |
383 | int priority) |
384 | { |
385 | int bit = queue_no * 4; |
386 | |
387 | edma_modify(ecc, EDMA_QUEPRI, and: ~(0x7 << bit), or: ((priority & 0x7) << bit)); |
388 | } |
389 | |
390 | static void edma_set_chmap(struct edma_chan *echan, int slot) |
391 | { |
392 | struct edma_cc *ecc = echan->ecc; |
393 | int channel = EDMA_CHAN_SLOT(echan->ch_num); |
394 | |
395 | if (ecc->chmap_exist) { |
396 | slot = EDMA_CHAN_SLOT(slot); |
397 | edma_write_array(ecc, EDMA_DCHMAP, i: channel, val: (slot << 5)); |
398 | } |
399 | } |
400 | |
401 | static void edma_setup_interrupt(struct edma_chan *echan, bool enable) |
402 | { |
403 | struct edma_cc *ecc = echan->ecc; |
404 | int channel = EDMA_CHAN_SLOT(echan->ch_num); |
405 | int idx = EDMA_REG_ARRAY_INDEX(channel); |
406 | int ch_bit = EDMA_CHANNEL_BIT(channel); |
407 | |
408 | if (enable) { |
409 | edma_shadow0_write_array(ecc, SH_ICR, i: idx, val: ch_bit); |
410 | edma_shadow0_write_array(ecc, SH_IESR, i: idx, val: ch_bit); |
411 | } else { |
412 | edma_shadow0_write_array(ecc, SH_IECR, i: idx, val: ch_bit); |
413 | } |
414 | } |
415 | |
416 | /* |
417 | * paRAM slot management functions |
418 | */ |
419 | static void edma_write_slot(struct edma_cc *ecc, unsigned slot, |
420 | const struct edmacc_param *param) |
421 | { |
422 | slot = EDMA_CHAN_SLOT(slot); |
423 | if (slot >= ecc->num_slots) |
424 | return; |
425 | memcpy_toio(ecc->base + PARM_OFFSET(slot), param, PARM_SIZE); |
426 | } |
427 | |
428 | static int edma_read_slot(struct edma_cc *ecc, unsigned slot, |
429 | struct edmacc_param *param) |
430 | { |
431 | slot = EDMA_CHAN_SLOT(slot); |
432 | if (slot >= ecc->num_slots) |
433 | return -EINVAL; |
434 | memcpy_fromio(param, ecc->base + PARM_OFFSET(slot), PARM_SIZE); |
435 | |
436 | return 0; |
437 | } |
438 | |
439 | /** |
440 | * edma_alloc_slot - allocate DMA parameter RAM |
441 | * @ecc: pointer to edma_cc struct |
442 | * @slot: specific slot to allocate; negative for "any unused slot" |
443 | * |
444 | * This allocates a parameter RAM slot, initializing it to hold a |
445 | * dummy transfer. Slots allocated using this routine have not been |
446 | * mapped to a hardware DMA channel, and will normally be used by |
447 | * linking to them from a slot associated with a DMA channel. |
448 | * |
449 | * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific |
450 | * slots may be allocated on behalf of DSP firmware. |
451 | * |
452 | * Returns the number of the slot, else negative errno. |
453 | */ |
454 | static int edma_alloc_slot(struct edma_cc *ecc, int slot) |
455 | { |
456 | if (slot >= 0) { |
457 | slot = EDMA_CHAN_SLOT(slot); |
458 | /* Requesting entry paRAM slot for a HW triggered channel. */ |
459 | if (ecc->chmap_exist && slot < ecc->num_channels) |
460 | slot = EDMA_SLOT_ANY; |
461 | } |
462 | |
463 | if (slot < 0) { |
464 | if (ecc->chmap_exist) |
465 | slot = 0; |
466 | else |
467 | slot = ecc->num_channels; |
468 | for (;;) { |
469 | slot = find_next_zero_bit(addr: ecc->slot_inuse, |
470 | size: ecc->num_slots, |
471 | offset: slot); |
472 | if (slot == ecc->num_slots) |
473 | return -ENOMEM; |
474 | if (!test_and_set_bit(nr: slot, addr: ecc->slot_inuse)) |
475 | break; |
476 | } |
477 | } else if (slot >= ecc->num_slots) { |
478 | return -EINVAL; |
479 | } else if (test_and_set_bit(nr: slot, addr: ecc->slot_inuse)) { |
480 | return -EBUSY; |
481 | } |
482 | |
483 | edma_write_slot(ecc, slot, param: &dummy_paramset); |
484 | |
485 | return EDMA_CTLR_CHAN(ecc->id, slot); |
486 | } |
487 | |
488 | static void edma_free_slot(struct edma_cc *ecc, unsigned slot) |
489 | { |
490 | slot = EDMA_CHAN_SLOT(slot); |
491 | if (slot >= ecc->num_slots) |
492 | return; |
493 | |
494 | edma_write_slot(ecc, slot, param: &dummy_paramset); |
495 | clear_bit(nr: slot, addr: ecc->slot_inuse); |
496 | } |
497 | |
498 | /** |
499 | * edma_link - link one parameter RAM slot to another |
500 | * @ecc: pointer to edma_cc struct |
501 | * @from: parameter RAM slot originating the link |
502 | * @to: parameter RAM slot which is the link target |
503 | * |
504 | * The originating slot should not be part of any active DMA transfer. |
505 | */ |
506 | static void edma_link(struct edma_cc *ecc, unsigned from, unsigned to) |
507 | { |
508 | if (unlikely(EDMA_CTLR(from) != EDMA_CTLR(to))) |
509 | dev_warn(ecc->dev, "Ignoring eDMA instance for linking\n" ); |
510 | |
511 | from = EDMA_CHAN_SLOT(from); |
512 | to = EDMA_CHAN_SLOT(to); |
513 | if (from >= ecc->num_slots || to >= ecc->num_slots) |
514 | return; |
515 | |
516 | edma_param_modify(ecc, PARM_LINK_BCNTRLD, param_no: from, and: 0xffff0000, |
517 | PARM_OFFSET(to)); |
518 | } |
519 | |
520 | /** |
521 | * edma_get_position - returns the current transfer point |
522 | * @ecc: pointer to edma_cc struct |
523 | * @slot: parameter RAM slot being examined |
524 | * @dst: true selects the dest position, false the source |
525 | * |
526 | * Returns the position of the current active slot |
527 | */ |
528 | static dma_addr_t edma_get_position(struct edma_cc *ecc, unsigned slot, |
529 | bool dst) |
530 | { |
531 | u32 offs; |
532 | |
533 | slot = EDMA_CHAN_SLOT(slot); |
534 | offs = PARM_OFFSET(slot); |
535 | offs += dst ? PARM_DST : PARM_SRC; |
536 | |
537 | return edma_read(ecc, offset: offs); |
538 | } |
539 | |
540 | /* |
541 | * Channels with event associations will be triggered by their hardware |
542 | * events, and channels without such associations will be triggered by |
543 | * software. (At this writing there is no interface for using software |
544 | * triggers except with channels that don't support hardware triggers.) |
545 | */ |
546 | static void edma_start(struct edma_chan *echan) |
547 | { |
548 | struct edma_cc *ecc = echan->ecc; |
549 | int channel = EDMA_CHAN_SLOT(echan->ch_num); |
550 | int idx = EDMA_REG_ARRAY_INDEX(channel); |
551 | int ch_bit = EDMA_CHANNEL_BIT(channel); |
552 | |
553 | if (!echan->hw_triggered) { |
554 | /* EDMA channels without event association */ |
555 | dev_dbg(ecc->dev, "ESR%d %08x\n" , idx, |
556 | edma_shadow0_read_array(ecc, SH_ESR, idx)); |
557 | edma_shadow0_write_array(ecc, SH_ESR, i: idx, val: ch_bit); |
558 | } else { |
559 | /* EDMA channel with event association */ |
560 | dev_dbg(ecc->dev, "ER%d %08x\n" , idx, |
561 | edma_shadow0_read_array(ecc, SH_ER, idx)); |
562 | /* Clear any pending event or error */ |
563 | edma_write_array(ecc, EDMA_ECR, i: idx, val: ch_bit); |
564 | edma_write_array(ecc, EDMA_EMCR, i: idx, val: ch_bit); |
565 | /* Clear any SER */ |
566 | edma_shadow0_write_array(ecc, SH_SECR, i: idx, val: ch_bit); |
567 | edma_shadow0_write_array(ecc, SH_EESR, i: idx, val: ch_bit); |
568 | dev_dbg(ecc->dev, "EER%d %08x\n" , idx, |
569 | edma_shadow0_read_array(ecc, SH_EER, idx)); |
570 | } |
571 | } |
572 | |
573 | static void edma_stop(struct edma_chan *echan) |
574 | { |
575 | struct edma_cc *ecc = echan->ecc; |
576 | int channel = EDMA_CHAN_SLOT(echan->ch_num); |
577 | int idx = EDMA_REG_ARRAY_INDEX(channel); |
578 | int ch_bit = EDMA_CHANNEL_BIT(channel); |
579 | |
580 | edma_shadow0_write_array(ecc, SH_EECR, i: idx, val: ch_bit); |
581 | edma_shadow0_write_array(ecc, SH_ECR, i: idx, val: ch_bit); |
582 | edma_shadow0_write_array(ecc, SH_SECR, i: idx, val: ch_bit); |
583 | edma_write_array(ecc, EDMA_EMCR, i: idx, val: ch_bit); |
584 | |
585 | /* clear possibly pending completion interrupt */ |
586 | edma_shadow0_write_array(ecc, SH_ICR, i: idx, val: ch_bit); |
587 | |
588 | dev_dbg(ecc->dev, "EER%d %08x\n" , idx, |
589 | edma_shadow0_read_array(ecc, SH_EER, idx)); |
590 | |
591 | /* REVISIT: consider guarding against inappropriate event |
592 | * chaining by overwriting with dummy_paramset. |
593 | */ |
594 | } |
595 | |
596 | /* |
597 | * Temporarily disable EDMA hardware events on the specified channel, |
598 | * preventing them from triggering new transfers |
599 | */ |
600 | static void edma_pause(struct edma_chan *echan) |
601 | { |
602 | int channel = EDMA_CHAN_SLOT(echan->ch_num); |
603 | |
604 | edma_shadow0_write_array(ecc: echan->ecc, SH_EECR, |
605 | EDMA_REG_ARRAY_INDEX(channel), |
606 | EDMA_CHANNEL_BIT(channel)); |
607 | } |
608 | |
609 | /* Re-enable EDMA hardware events on the specified channel. */ |
610 | static void edma_resume(struct edma_chan *echan) |
611 | { |
612 | int channel = EDMA_CHAN_SLOT(echan->ch_num); |
613 | |
614 | edma_shadow0_write_array(ecc: echan->ecc, SH_EESR, |
615 | EDMA_REG_ARRAY_INDEX(channel), |
616 | EDMA_CHANNEL_BIT(channel)); |
617 | } |
618 | |
619 | static void edma_trigger_channel(struct edma_chan *echan) |
620 | { |
621 | struct edma_cc *ecc = echan->ecc; |
622 | int channel = EDMA_CHAN_SLOT(echan->ch_num); |
623 | int idx = EDMA_REG_ARRAY_INDEX(channel); |
624 | int ch_bit = EDMA_CHANNEL_BIT(channel); |
625 | |
626 | edma_shadow0_write_array(ecc, SH_ESR, i: idx, val: ch_bit); |
627 | |
628 | dev_dbg(ecc->dev, "ESR%d %08x\n" , idx, |
629 | edma_shadow0_read_array(ecc, SH_ESR, idx)); |
630 | } |
631 | |
632 | static void edma_clean_channel(struct edma_chan *echan) |
633 | { |
634 | struct edma_cc *ecc = echan->ecc; |
635 | int channel = EDMA_CHAN_SLOT(echan->ch_num); |
636 | int idx = EDMA_REG_ARRAY_INDEX(channel); |
637 | int ch_bit = EDMA_CHANNEL_BIT(channel); |
638 | |
639 | dev_dbg(ecc->dev, "EMR%d %08x\n" , idx, |
640 | edma_read_array(ecc, EDMA_EMR, idx)); |
641 | edma_shadow0_write_array(ecc, SH_ECR, i: idx, val: ch_bit); |
642 | /* Clear the corresponding EMR bits */ |
643 | edma_write_array(ecc, EDMA_EMCR, i: idx, val: ch_bit); |
644 | /* Clear any SER */ |
645 | edma_shadow0_write_array(ecc, SH_SECR, i: idx, val: ch_bit); |
646 | edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0)); |
647 | } |
648 | |
649 | /* Move channel to a specific event queue */ |
650 | static void edma_assign_channel_eventq(struct edma_chan *echan, |
651 | enum dma_event_q eventq_no) |
652 | { |
653 | struct edma_cc *ecc = echan->ecc; |
654 | int channel = EDMA_CHAN_SLOT(echan->ch_num); |
655 | int bit = (channel & 0x7) * 4; |
656 | |
657 | /* default to low priority queue */ |
658 | if (eventq_no == EVENTQ_DEFAULT) |
659 | eventq_no = ecc->default_queue; |
660 | if (eventq_no >= ecc->num_tc) |
661 | return; |
662 | |
663 | eventq_no &= 7; |
664 | edma_modify_array(ecc, EDMA_DMAQNUM, i: (channel >> 3), and: ~(0x7 << bit), |
665 | or: eventq_no << bit); |
666 | } |
667 | |
668 | static int edma_alloc_channel(struct edma_chan *echan, |
669 | enum dma_event_q eventq_no) |
670 | { |
671 | struct edma_cc *ecc = echan->ecc; |
672 | int channel = EDMA_CHAN_SLOT(echan->ch_num); |
673 | |
674 | if (!test_bit(echan->ch_num, ecc->channels_mask)) { |
675 | dev_err(ecc->dev, "Channel%d is reserved, can not be used!\n" , |
676 | echan->ch_num); |
677 | return -EINVAL; |
678 | } |
679 | |
680 | /* ensure access through shadow region 0 */ |
681 | edma_or_array2(ecc, EDMA_DRAE, i: 0, EDMA_REG_ARRAY_INDEX(channel), |
682 | EDMA_CHANNEL_BIT(channel)); |
683 | |
684 | /* ensure no events are pending */ |
685 | edma_stop(echan); |
686 | |
687 | edma_setup_interrupt(echan, enable: true); |
688 | |
689 | edma_assign_channel_eventq(echan, eventq_no); |
690 | |
691 | return 0; |
692 | } |
693 | |
694 | static void edma_free_channel(struct edma_chan *echan) |
695 | { |
696 | /* ensure no events are pending */ |
697 | edma_stop(echan); |
698 | /* REVISIT should probably take out of shadow region 0 */ |
699 | edma_setup_interrupt(echan, enable: false); |
700 | } |
701 | |
702 | static inline struct edma_chan *to_edma_chan(struct dma_chan *c) |
703 | { |
704 | return container_of(c, struct edma_chan, vchan.chan); |
705 | } |
706 | |
707 | static inline struct edma_desc *to_edma_desc(struct dma_async_tx_descriptor *tx) |
708 | { |
709 | return container_of(tx, struct edma_desc, vdesc.tx); |
710 | } |
711 | |
712 | static void edma_desc_free(struct virt_dma_desc *vdesc) |
713 | { |
714 | kfree(container_of(vdesc, struct edma_desc, vdesc)); |
715 | } |
716 | |
717 | /* Dispatch a queued descriptor to the controller (caller holds lock) */ |
718 | static void edma_execute(struct edma_chan *echan) |
719 | { |
720 | struct edma_cc *ecc = echan->ecc; |
721 | struct virt_dma_desc *vdesc; |
722 | struct edma_desc *edesc; |
723 | struct device *dev = echan->vchan.chan.device->dev; |
724 | int i, j, left, nslots; |
725 | |
726 | if (!echan->edesc) { |
727 | /* Setup is needed for the first transfer */ |
728 | vdesc = vchan_next_desc(vc: &echan->vchan); |
729 | if (!vdesc) |
730 | return; |
731 | list_del(entry: &vdesc->node); |
732 | echan->edesc = to_edma_desc(tx: &vdesc->tx); |
733 | } |
734 | |
735 | edesc = echan->edesc; |
736 | |
737 | /* Find out how many left */ |
738 | left = edesc->pset_nr - edesc->processed; |
739 | nslots = min(MAX_NR_SG, left); |
740 | edesc->sg_len = 0; |
741 | |
742 | /* Write descriptor PaRAM set(s) */ |
743 | for (i = 0; i < nslots; i++) { |
744 | j = i + edesc->processed; |
745 | edma_write_slot(ecc, slot: echan->slot[i], param: &edesc->pset[j].param); |
746 | edesc->sg_len += edesc->pset[j].len; |
747 | dev_vdbg(dev, |
748 | "\n pset[%d]:\n" |
749 | " chnum\t%d\n" |
750 | " slot\t%d\n" |
751 | " opt\t%08x\n" |
752 | " src\t%08x\n" |
753 | " dst\t%08x\n" |
754 | " abcnt\t%08x\n" |
755 | " ccnt\t%08x\n" |
756 | " bidx\t%08x\n" |
757 | " cidx\t%08x\n" |
758 | " lkrld\t%08x\n" , |
759 | j, echan->ch_num, echan->slot[i], |
760 | edesc->pset[j].param.opt, |
761 | edesc->pset[j].param.src, |
762 | edesc->pset[j].param.dst, |
763 | edesc->pset[j].param.a_b_cnt, |
764 | edesc->pset[j].param.ccnt, |
765 | edesc->pset[j].param.src_dst_bidx, |
766 | edesc->pset[j].param.src_dst_cidx, |
767 | edesc->pset[j].param.link_bcntrld); |
768 | /* Link to the previous slot if not the last set */ |
769 | if (i != (nslots - 1)) |
770 | edma_link(ecc, from: echan->slot[i], to: echan->slot[i + 1]); |
771 | } |
772 | |
773 | edesc->processed += nslots; |
774 | |
775 | /* |
776 | * If this is either the last set in a set of SG-list transactions |
777 | * then setup a link to the dummy slot, this results in all future |
778 | * events being absorbed and that's OK because we're done |
779 | */ |
780 | if (edesc->processed == edesc->pset_nr) { |
781 | if (edesc->cyclic) |
782 | edma_link(ecc, from: echan->slot[nslots - 1], to: echan->slot[1]); |
783 | else |
784 | edma_link(ecc, from: echan->slot[nslots - 1], |
785 | to: echan->ecc->dummy_slot); |
786 | } |
787 | |
788 | if (echan->missed) { |
789 | /* |
790 | * This happens due to setup times between intermediate |
791 | * transfers in long SG lists which have to be broken up into |
792 | * transfers of MAX_NR_SG |
793 | */ |
794 | dev_dbg(dev, "missed event on channel %d\n" , echan->ch_num); |
795 | edma_clean_channel(echan); |
796 | edma_stop(echan); |
797 | edma_start(echan); |
798 | edma_trigger_channel(echan); |
799 | echan->missed = 0; |
800 | } else if (edesc->processed <= MAX_NR_SG) { |
801 | dev_dbg(dev, "first transfer starting on channel %d\n" , |
802 | echan->ch_num); |
803 | edma_start(echan); |
804 | } else { |
805 | dev_dbg(dev, "chan: %d: completed %d elements, resuming\n" , |
806 | echan->ch_num, edesc->processed); |
807 | edma_resume(echan); |
808 | } |
809 | } |
810 | |
811 | static int edma_terminate_all(struct dma_chan *chan) |
812 | { |
813 | struct edma_chan *echan = to_edma_chan(c: chan); |
814 | unsigned long flags; |
815 | LIST_HEAD(head); |
816 | |
817 | spin_lock_irqsave(&echan->vchan.lock, flags); |
818 | |
819 | /* |
820 | * Stop DMA activity: we assume the callback will not be called |
821 | * after edma_dma() returns (even if it does, it will see |
822 | * echan->edesc is NULL and exit.) |
823 | */ |
824 | if (echan->edesc) { |
825 | edma_stop(echan); |
826 | /* Move the cyclic channel back to default queue */ |
827 | if (!echan->tc && echan->edesc->cyclic) |
828 | edma_assign_channel_eventq(echan, eventq_no: EVENTQ_DEFAULT); |
829 | |
830 | vchan_terminate_vdesc(vd: &echan->edesc->vdesc); |
831 | echan->edesc = NULL; |
832 | } |
833 | |
834 | vchan_get_all_descriptors(vc: &echan->vchan, head: &head); |
835 | spin_unlock_irqrestore(lock: &echan->vchan.lock, flags); |
836 | vchan_dma_desc_free_list(vc: &echan->vchan, head: &head); |
837 | |
838 | return 0; |
839 | } |
840 | |
841 | static void edma_synchronize(struct dma_chan *chan) |
842 | { |
843 | struct edma_chan *echan = to_edma_chan(c: chan); |
844 | |
845 | vchan_synchronize(vc: &echan->vchan); |
846 | } |
847 | |
848 | static int edma_slave_config(struct dma_chan *chan, |
849 | struct dma_slave_config *cfg) |
850 | { |
851 | struct edma_chan *echan = to_edma_chan(c: chan); |
852 | |
853 | if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || |
854 | cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) |
855 | return -EINVAL; |
856 | |
857 | if (cfg->src_maxburst > chan->device->max_burst || |
858 | cfg->dst_maxburst > chan->device->max_burst) |
859 | return -EINVAL; |
860 | |
861 | memcpy(&echan->cfg, cfg, sizeof(echan->cfg)); |
862 | |
863 | return 0; |
864 | } |
865 | |
866 | static int edma_dma_pause(struct dma_chan *chan) |
867 | { |
868 | struct edma_chan *echan = to_edma_chan(c: chan); |
869 | |
870 | if (!echan->edesc) |
871 | return -EINVAL; |
872 | |
873 | edma_pause(echan); |
874 | return 0; |
875 | } |
876 | |
877 | static int edma_dma_resume(struct dma_chan *chan) |
878 | { |
879 | struct edma_chan *echan = to_edma_chan(c: chan); |
880 | |
881 | edma_resume(echan); |
882 | return 0; |
883 | } |
884 | |
885 | /* |
886 | * A PaRAM set configuration abstraction used by other modes |
887 | * @chan: Channel who's PaRAM set we're configuring |
888 | * @pset: PaRAM set to initialize and setup. |
889 | * @src_addr: Source address of the DMA |
890 | * @dst_addr: Destination address of the DMA |
891 | * @burst: In units of dev_width, how much to send |
892 | * @dev_width: How much is the dev_width |
893 | * @dma_length: Total length of the DMA transfer |
894 | * @direction: Direction of the transfer |
895 | */ |
896 | static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset, |
897 | dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst, |
898 | unsigned int acnt, unsigned int dma_length, |
899 | enum dma_transfer_direction direction) |
900 | { |
901 | struct edma_chan *echan = to_edma_chan(c: chan); |
902 | struct device *dev = chan->device->dev; |
903 | struct edmacc_param *param = &epset->param; |
904 | int bcnt, ccnt, cidx; |
905 | int src_bidx, dst_bidx, src_cidx, dst_cidx; |
906 | int absync; |
907 | |
908 | /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */ |
909 | if (!burst) |
910 | burst = 1; |
911 | /* |
912 | * If the maxburst is equal to the fifo width, use |
913 | * A-synced transfers. This allows for large contiguous |
914 | * buffer transfers using only one PaRAM set. |
915 | */ |
916 | if (burst == 1) { |
917 | /* |
918 | * For the A-sync case, bcnt and ccnt are the remainder |
919 | * and quotient respectively of the division of: |
920 | * (dma_length / acnt) by (SZ_64K -1). This is so |
921 | * that in case bcnt over flows, we have ccnt to use. |
922 | * Note: In A-sync transfer only, bcntrld is used, but it |
923 | * only applies for sg_dma_len(sg) >= SZ_64K. |
924 | * In this case, the best way adopted is- bccnt for the |
925 | * first frame will be the remainder below. Then for |
926 | * every successive frame, bcnt will be SZ_64K-1. This |
927 | * is assured as bcntrld = 0xffff in end of function. |
928 | */ |
929 | absync = false; |
930 | ccnt = dma_length / acnt / (SZ_64K - 1); |
931 | bcnt = dma_length / acnt - ccnt * (SZ_64K - 1); |
932 | /* |
933 | * If bcnt is non-zero, we have a remainder and hence an |
934 | * extra frame to transfer, so increment ccnt. |
935 | */ |
936 | if (bcnt) |
937 | ccnt++; |
938 | else |
939 | bcnt = SZ_64K - 1; |
940 | cidx = acnt; |
941 | } else { |
942 | /* |
943 | * If maxburst is greater than the fifo address_width, |
944 | * use AB-synced transfers where A count is the fifo |
945 | * address_width and B count is the maxburst. In this |
946 | * case, we are limited to transfers of C count frames |
947 | * of (address_width * maxburst) where C count is limited |
948 | * to SZ_64K-1. This places an upper bound on the length |
949 | * of an SG segment that can be handled. |
950 | */ |
951 | absync = true; |
952 | bcnt = burst; |
953 | ccnt = dma_length / (acnt * bcnt); |
954 | if (ccnt > (SZ_64K - 1)) { |
955 | dev_err(dev, "Exceeded max SG segment size\n" ); |
956 | return -EINVAL; |
957 | } |
958 | cidx = acnt * bcnt; |
959 | } |
960 | |
961 | epset->len = dma_length; |
962 | |
963 | if (direction == DMA_MEM_TO_DEV) { |
964 | src_bidx = acnt; |
965 | src_cidx = cidx; |
966 | dst_bidx = 0; |
967 | dst_cidx = 0; |
968 | epset->addr = src_addr; |
969 | } else if (direction == DMA_DEV_TO_MEM) { |
970 | src_bidx = 0; |
971 | src_cidx = 0; |
972 | dst_bidx = acnt; |
973 | dst_cidx = cidx; |
974 | epset->addr = dst_addr; |
975 | } else if (direction == DMA_MEM_TO_MEM) { |
976 | src_bidx = acnt; |
977 | src_cidx = cidx; |
978 | dst_bidx = acnt; |
979 | dst_cidx = cidx; |
980 | epset->addr = src_addr; |
981 | } else { |
982 | dev_err(dev, "%s: direction not implemented yet\n" , __func__); |
983 | return -EINVAL; |
984 | } |
985 | |
986 | param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); |
987 | /* Configure A or AB synchronized transfers */ |
988 | if (absync) |
989 | param->opt |= SYNCDIM; |
990 | |
991 | param->src = src_addr; |
992 | param->dst = dst_addr; |
993 | |
994 | param->src_dst_bidx = (dst_bidx << 16) | src_bidx; |
995 | param->src_dst_cidx = (dst_cidx << 16) | src_cidx; |
996 | |
997 | param->a_b_cnt = bcnt << 16 | acnt; |
998 | param->ccnt = ccnt; |
999 | /* |
1000 | * Only time when (bcntrld) auto reload is required is for |
1001 | * A-sync case, and in this case, a requirement of reload value |
1002 | * of SZ_64K-1 only is assured. 'link' is initially set to NULL |
1003 | * and then later will be populated by edma_execute. |
1004 | */ |
1005 | param->link_bcntrld = 0xffffffff; |
1006 | return absync; |
1007 | } |
1008 | |
1009 | static struct dma_async_tx_descriptor *edma_prep_slave_sg( |
1010 | struct dma_chan *chan, struct scatterlist *sgl, |
1011 | unsigned int sg_len, enum dma_transfer_direction direction, |
1012 | unsigned long tx_flags, void *context) |
1013 | { |
1014 | struct edma_chan *echan = to_edma_chan(c: chan); |
1015 | struct device *dev = chan->device->dev; |
1016 | struct edma_desc *edesc; |
1017 | dma_addr_t src_addr = 0, dst_addr = 0; |
1018 | enum dma_slave_buswidth dev_width; |
1019 | u32 burst; |
1020 | struct scatterlist *sg; |
1021 | int i, nslots, ret; |
1022 | |
1023 | if (unlikely(!echan || !sgl || !sg_len)) |
1024 | return NULL; |
1025 | |
1026 | if (direction == DMA_DEV_TO_MEM) { |
1027 | src_addr = echan->cfg.src_addr; |
1028 | dev_width = echan->cfg.src_addr_width; |
1029 | burst = echan->cfg.src_maxburst; |
1030 | } else if (direction == DMA_MEM_TO_DEV) { |
1031 | dst_addr = echan->cfg.dst_addr; |
1032 | dev_width = echan->cfg.dst_addr_width; |
1033 | burst = echan->cfg.dst_maxburst; |
1034 | } else { |
1035 | dev_err(dev, "%s: bad direction: %d\n" , __func__, direction); |
1036 | return NULL; |
1037 | } |
1038 | |
1039 | if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { |
1040 | dev_err(dev, "%s: Undefined slave buswidth\n" , __func__); |
1041 | return NULL; |
1042 | } |
1043 | |
1044 | edesc = kzalloc(struct_size(edesc, pset, sg_len), GFP_ATOMIC); |
1045 | if (!edesc) |
1046 | return NULL; |
1047 | |
1048 | edesc->pset_nr = sg_len; |
1049 | edesc->residue = 0; |
1050 | edesc->direction = direction; |
1051 | edesc->echan = echan; |
1052 | |
1053 | /* Allocate a PaRAM slot, if needed */ |
1054 | nslots = min_t(unsigned, MAX_NR_SG, sg_len); |
1055 | |
1056 | for (i = 0; i < nslots; i++) { |
1057 | if (echan->slot[i] < 0) { |
1058 | echan->slot[i] = |
1059 | edma_alloc_slot(ecc: echan->ecc, EDMA_SLOT_ANY); |
1060 | if (echan->slot[i] < 0) { |
1061 | kfree(objp: edesc); |
1062 | dev_err(dev, "%s: Failed to allocate slot\n" , |
1063 | __func__); |
1064 | return NULL; |
1065 | } |
1066 | } |
1067 | } |
1068 | |
1069 | /* Configure PaRAM sets for each SG */ |
1070 | for_each_sg(sgl, sg, sg_len, i) { |
1071 | /* Get address for each SG */ |
1072 | if (direction == DMA_DEV_TO_MEM) |
1073 | dst_addr = sg_dma_address(sg); |
1074 | else |
1075 | src_addr = sg_dma_address(sg); |
1076 | |
1077 | ret = edma_config_pset(chan, epset: &edesc->pset[i], src_addr, |
1078 | dst_addr, burst, acnt: dev_width, |
1079 | sg_dma_len(sg), direction); |
1080 | if (ret < 0) { |
1081 | kfree(objp: edesc); |
1082 | return NULL; |
1083 | } |
1084 | |
1085 | edesc->absync = ret; |
1086 | edesc->residue += sg_dma_len(sg); |
1087 | |
1088 | if (i == sg_len - 1) |
1089 | /* Enable completion interrupt */ |
1090 | edesc->pset[i].param.opt |= TCINTEN; |
1091 | else if (!((i+1) % MAX_NR_SG)) |
1092 | /* |
1093 | * Enable early completion interrupt for the |
1094 | * intermediateset. In this case the driver will be |
1095 | * notified when the paRAM set is submitted to TC. This |
1096 | * will allow more time to set up the next set of slots. |
1097 | */ |
1098 | edesc->pset[i].param.opt |= (TCINTEN | TCCMODE); |
1099 | } |
1100 | edesc->residue_stat = edesc->residue; |
1101 | |
1102 | return vchan_tx_prep(vc: &echan->vchan, vd: &edesc->vdesc, tx_flags); |
1103 | } |
1104 | |
1105 | static struct dma_async_tx_descriptor *edma_prep_dma_memcpy( |
1106 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, |
1107 | size_t len, unsigned long tx_flags) |
1108 | { |
1109 | int ret, nslots; |
1110 | struct edma_desc *edesc; |
1111 | struct device *dev = chan->device->dev; |
1112 | struct edma_chan *echan = to_edma_chan(c: chan); |
1113 | unsigned int width, pset_len, array_size; |
1114 | |
1115 | if (unlikely(!echan || !len)) |
1116 | return NULL; |
1117 | |
1118 | /* Align the array size (acnt block) with the transfer properties */ |
1119 | switch (__ffs((src | dest | len))) { |
1120 | case 0: |
1121 | array_size = SZ_32K - 1; |
1122 | break; |
1123 | case 1: |
1124 | array_size = SZ_32K - 2; |
1125 | break; |
1126 | default: |
1127 | array_size = SZ_32K - 4; |
1128 | break; |
1129 | } |
1130 | |
1131 | if (len < SZ_64K) { |
1132 | /* |
1133 | * Transfer size less than 64K can be handled with one paRAM |
1134 | * slot and with one burst. |
1135 | * ACNT = length |
1136 | */ |
1137 | width = len; |
1138 | pset_len = len; |
1139 | nslots = 1; |
1140 | } else { |
1141 | /* |
1142 | * Transfer size bigger than 64K will be handled with maximum of |
1143 | * two paRAM slots. |
1144 | * slot1: (full_length / 32767) times 32767 bytes bursts. |
1145 | * ACNT = 32767, length1: (full_length / 32767) * 32767 |
1146 | * slot2: the remaining amount of data after slot1. |
1147 | * ACNT = full_length - length1, length2 = ACNT |
1148 | * |
1149 | * When the full_length is a multiple of 32767 one slot can be |
1150 | * used to complete the transfer. |
1151 | */ |
1152 | width = array_size; |
1153 | pset_len = rounddown(len, width); |
1154 | /* One slot is enough for lengths multiple of (SZ_32K -1) */ |
1155 | if (unlikely(pset_len == len)) |
1156 | nslots = 1; |
1157 | else |
1158 | nslots = 2; |
1159 | } |
1160 | |
1161 | edesc = kzalloc(struct_size(edesc, pset, nslots), GFP_ATOMIC); |
1162 | if (!edesc) |
1163 | return NULL; |
1164 | |
1165 | edesc->pset_nr = nslots; |
1166 | edesc->residue = edesc->residue_stat = len; |
1167 | edesc->direction = DMA_MEM_TO_MEM; |
1168 | edesc->echan = echan; |
1169 | |
1170 | ret = edma_config_pset(chan, epset: &edesc->pset[0], src_addr: src, dst_addr: dest, burst: 1, |
1171 | acnt: width, dma_length: pset_len, direction: DMA_MEM_TO_MEM); |
1172 | if (ret < 0) { |
1173 | kfree(objp: edesc); |
1174 | return NULL; |
1175 | } |
1176 | |
1177 | edesc->absync = ret; |
1178 | |
1179 | edesc->pset[0].param.opt |= ITCCHEN; |
1180 | if (nslots == 1) { |
1181 | /* Enable transfer complete interrupt if requested */ |
1182 | if (tx_flags & DMA_PREP_INTERRUPT) |
1183 | edesc->pset[0].param.opt |= TCINTEN; |
1184 | } else { |
1185 | /* Enable transfer complete chaining for the first slot */ |
1186 | edesc->pset[0].param.opt |= TCCHEN; |
1187 | |
1188 | if (echan->slot[1] < 0) { |
1189 | echan->slot[1] = edma_alloc_slot(ecc: echan->ecc, |
1190 | EDMA_SLOT_ANY); |
1191 | if (echan->slot[1] < 0) { |
1192 | kfree(objp: edesc); |
1193 | dev_err(dev, "%s: Failed to allocate slot\n" , |
1194 | __func__); |
1195 | return NULL; |
1196 | } |
1197 | } |
1198 | dest += pset_len; |
1199 | src += pset_len; |
1200 | pset_len = width = len % array_size; |
1201 | |
1202 | ret = edma_config_pset(chan, epset: &edesc->pset[1], src_addr: src, dst_addr: dest, burst: 1, |
1203 | acnt: width, dma_length: pset_len, direction: DMA_MEM_TO_MEM); |
1204 | if (ret < 0) { |
1205 | kfree(objp: edesc); |
1206 | return NULL; |
1207 | } |
1208 | |
1209 | edesc->pset[1].param.opt |= ITCCHEN; |
1210 | /* Enable transfer complete interrupt if requested */ |
1211 | if (tx_flags & DMA_PREP_INTERRUPT) |
1212 | edesc->pset[1].param.opt |= TCINTEN; |
1213 | } |
1214 | |
1215 | if (!(tx_flags & DMA_PREP_INTERRUPT)) |
1216 | edesc->polled = true; |
1217 | |
1218 | return vchan_tx_prep(vc: &echan->vchan, vd: &edesc->vdesc, tx_flags); |
1219 | } |
1220 | |
1221 | static struct dma_async_tx_descriptor * |
1222 | edma_prep_dma_interleaved(struct dma_chan *chan, |
1223 | struct dma_interleaved_template *xt, |
1224 | unsigned long tx_flags) |
1225 | { |
1226 | struct device *dev = chan->device->dev; |
1227 | struct edma_chan *echan = to_edma_chan(c: chan); |
1228 | struct edmacc_param *param; |
1229 | struct edma_desc *edesc; |
1230 | size_t src_icg, dst_icg; |
1231 | int src_bidx, dst_bidx; |
1232 | |
1233 | /* Slave mode is not supported */ |
1234 | if (is_slave_direction(direction: xt->dir)) |
1235 | return NULL; |
1236 | |
1237 | if (xt->frame_size != 1 || xt->numf == 0) |
1238 | return NULL; |
1239 | |
1240 | if (xt->sgl[0].size > SZ_64K || xt->numf > SZ_64K) |
1241 | return NULL; |
1242 | |
1243 | src_icg = dmaengine_get_src_icg(xt, chunk: &xt->sgl[0]); |
1244 | if (src_icg) { |
1245 | src_bidx = src_icg + xt->sgl[0].size; |
1246 | } else if (xt->src_inc) { |
1247 | src_bidx = xt->sgl[0].size; |
1248 | } else { |
1249 | dev_err(dev, "%s: SRC constant addressing is not supported\n" , |
1250 | __func__); |
1251 | return NULL; |
1252 | } |
1253 | |
1254 | dst_icg = dmaengine_get_dst_icg(xt, chunk: &xt->sgl[0]); |
1255 | if (dst_icg) { |
1256 | dst_bidx = dst_icg + xt->sgl[0].size; |
1257 | } else if (xt->dst_inc) { |
1258 | dst_bidx = xt->sgl[0].size; |
1259 | } else { |
1260 | dev_err(dev, "%s: DST constant addressing is not supported\n" , |
1261 | __func__); |
1262 | return NULL; |
1263 | } |
1264 | |
1265 | if (src_bidx > SZ_64K || dst_bidx > SZ_64K) |
1266 | return NULL; |
1267 | |
1268 | edesc = kzalloc(struct_size(edesc, pset, 1), GFP_ATOMIC); |
1269 | if (!edesc) |
1270 | return NULL; |
1271 | |
1272 | edesc->direction = DMA_MEM_TO_MEM; |
1273 | edesc->echan = echan; |
1274 | edesc->pset_nr = 1; |
1275 | |
1276 | param = &edesc->pset[0].param; |
1277 | |
1278 | param->src = xt->src_start; |
1279 | param->dst = xt->dst_start; |
1280 | param->a_b_cnt = xt->numf << 16 | xt->sgl[0].size; |
1281 | param->ccnt = 1; |
1282 | param->src_dst_bidx = (dst_bidx << 16) | src_bidx; |
1283 | param->src_dst_cidx = 0; |
1284 | |
1285 | param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); |
1286 | param->opt |= ITCCHEN; |
1287 | /* Enable transfer complete interrupt if requested */ |
1288 | if (tx_flags & DMA_PREP_INTERRUPT) |
1289 | param->opt |= TCINTEN; |
1290 | else |
1291 | edesc->polled = true; |
1292 | |
1293 | return vchan_tx_prep(vc: &echan->vchan, vd: &edesc->vdesc, tx_flags); |
1294 | } |
1295 | |
1296 | static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( |
1297 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, |
1298 | size_t period_len, enum dma_transfer_direction direction, |
1299 | unsigned long tx_flags) |
1300 | { |
1301 | struct edma_chan *echan = to_edma_chan(c: chan); |
1302 | struct device *dev = chan->device->dev; |
1303 | struct edma_desc *edesc; |
1304 | dma_addr_t src_addr, dst_addr; |
1305 | enum dma_slave_buswidth dev_width; |
1306 | bool use_intermediate = false; |
1307 | u32 burst; |
1308 | int i, ret, nslots; |
1309 | |
1310 | if (unlikely(!echan || !buf_len || !period_len)) |
1311 | return NULL; |
1312 | |
1313 | if (direction == DMA_DEV_TO_MEM) { |
1314 | src_addr = echan->cfg.src_addr; |
1315 | dst_addr = buf_addr; |
1316 | dev_width = echan->cfg.src_addr_width; |
1317 | burst = echan->cfg.src_maxburst; |
1318 | } else if (direction == DMA_MEM_TO_DEV) { |
1319 | src_addr = buf_addr; |
1320 | dst_addr = echan->cfg.dst_addr; |
1321 | dev_width = echan->cfg.dst_addr_width; |
1322 | burst = echan->cfg.dst_maxburst; |
1323 | } else { |
1324 | dev_err(dev, "%s: bad direction: %d\n" , __func__, direction); |
1325 | return NULL; |
1326 | } |
1327 | |
1328 | if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { |
1329 | dev_err(dev, "%s: Undefined slave buswidth\n" , __func__); |
1330 | return NULL; |
1331 | } |
1332 | |
1333 | if (unlikely(buf_len % period_len)) { |
1334 | dev_err(dev, "Period should be multiple of Buffer length\n" ); |
1335 | return NULL; |
1336 | } |
1337 | |
1338 | nslots = (buf_len / period_len) + 1; |
1339 | |
1340 | /* |
1341 | * Cyclic DMA users such as audio cannot tolerate delays introduced |
1342 | * by cases where the number of periods is more than the maximum |
1343 | * number of SGs the EDMA driver can handle at a time. For DMA types |
1344 | * such as Slave SGs, such delays are tolerable and synchronized, |
1345 | * but the synchronization is difficult to achieve with Cyclic and |
1346 | * cannot be guaranteed, so we error out early. |
1347 | */ |
1348 | if (nslots > MAX_NR_SG) { |
1349 | /* |
1350 | * If the burst and period sizes are the same, we can put |
1351 | * the full buffer into a single period and activate |
1352 | * intermediate interrupts. This will produce interrupts |
1353 | * after each burst, which is also after each desired period. |
1354 | */ |
1355 | if (burst == period_len) { |
1356 | period_len = buf_len; |
1357 | nslots = 2; |
1358 | use_intermediate = true; |
1359 | } else { |
1360 | return NULL; |
1361 | } |
1362 | } |
1363 | |
1364 | edesc = kzalloc(struct_size(edesc, pset, nslots), GFP_ATOMIC); |
1365 | if (!edesc) |
1366 | return NULL; |
1367 | |
1368 | edesc->cyclic = 1; |
1369 | edesc->pset_nr = nslots; |
1370 | edesc->residue = edesc->residue_stat = buf_len; |
1371 | edesc->direction = direction; |
1372 | edesc->echan = echan; |
1373 | |
1374 | dev_dbg(dev, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n" , |
1375 | __func__, echan->ch_num, nslots, period_len, buf_len); |
1376 | |
1377 | for (i = 0; i < nslots; i++) { |
1378 | /* Allocate a PaRAM slot, if needed */ |
1379 | if (echan->slot[i] < 0) { |
1380 | echan->slot[i] = |
1381 | edma_alloc_slot(ecc: echan->ecc, EDMA_SLOT_ANY); |
1382 | if (echan->slot[i] < 0) { |
1383 | kfree(objp: edesc); |
1384 | dev_err(dev, "%s: Failed to allocate slot\n" , |
1385 | __func__); |
1386 | return NULL; |
1387 | } |
1388 | } |
1389 | |
1390 | if (i == nslots - 1) { |
1391 | memcpy(&edesc->pset[i], &edesc->pset[0], |
1392 | sizeof(edesc->pset[0])); |
1393 | break; |
1394 | } |
1395 | |
1396 | ret = edma_config_pset(chan, epset: &edesc->pset[i], src_addr, |
1397 | dst_addr, burst, acnt: dev_width, dma_length: period_len, |
1398 | direction); |
1399 | if (ret < 0) { |
1400 | kfree(objp: edesc); |
1401 | return NULL; |
1402 | } |
1403 | |
1404 | if (direction == DMA_DEV_TO_MEM) |
1405 | dst_addr += period_len; |
1406 | else |
1407 | src_addr += period_len; |
1408 | |
1409 | dev_vdbg(dev, "%s: Configure period %d of buf:\n" , __func__, i); |
1410 | dev_vdbg(dev, |
1411 | "\n pset[%d]:\n" |
1412 | " chnum\t%d\n" |
1413 | " slot\t%d\n" |
1414 | " opt\t%08x\n" |
1415 | " src\t%08x\n" |
1416 | " dst\t%08x\n" |
1417 | " abcnt\t%08x\n" |
1418 | " ccnt\t%08x\n" |
1419 | " bidx\t%08x\n" |
1420 | " cidx\t%08x\n" |
1421 | " lkrld\t%08x\n" , |
1422 | i, echan->ch_num, echan->slot[i], |
1423 | edesc->pset[i].param.opt, |
1424 | edesc->pset[i].param.src, |
1425 | edesc->pset[i].param.dst, |
1426 | edesc->pset[i].param.a_b_cnt, |
1427 | edesc->pset[i].param.ccnt, |
1428 | edesc->pset[i].param.src_dst_bidx, |
1429 | edesc->pset[i].param.src_dst_cidx, |
1430 | edesc->pset[i].param.link_bcntrld); |
1431 | |
1432 | edesc->absync = ret; |
1433 | |
1434 | /* |
1435 | * Enable period interrupt only if it is requested |
1436 | */ |
1437 | if (tx_flags & DMA_PREP_INTERRUPT) { |
1438 | edesc->pset[i].param.opt |= TCINTEN; |
1439 | |
1440 | /* Also enable intermediate interrupts if necessary */ |
1441 | if (use_intermediate) |
1442 | edesc->pset[i].param.opt |= ITCINTEN; |
1443 | } |
1444 | } |
1445 | |
1446 | /* Place the cyclic channel to highest priority queue */ |
1447 | if (!echan->tc) |
1448 | edma_assign_channel_eventq(echan, eventq_no: EVENTQ_0); |
1449 | |
1450 | return vchan_tx_prep(vc: &echan->vchan, vd: &edesc->vdesc, tx_flags); |
1451 | } |
1452 | |
1453 | static void edma_completion_handler(struct edma_chan *echan) |
1454 | { |
1455 | struct device *dev = echan->vchan.chan.device->dev; |
1456 | struct edma_desc *edesc; |
1457 | |
1458 | spin_lock(lock: &echan->vchan.lock); |
1459 | edesc = echan->edesc; |
1460 | if (edesc) { |
1461 | if (edesc->cyclic) { |
1462 | vchan_cyclic_callback(vd: &edesc->vdesc); |
1463 | spin_unlock(lock: &echan->vchan.lock); |
1464 | return; |
1465 | } else if (edesc->processed == edesc->pset_nr) { |
1466 | edesc->residue = 0; |
1467 | edma_stop(echan); |
1468 | vchan_cookie_complete(vd: &edesc->vdesc); |
1469 | echan->edesc = NULL; |
1470 | |
1471 | dev_dbg(dev, "Transfer completed on channel %d\n" , |
1472 | echan->ch_num); |
1473 | } else { |
1474 | dev_dbg(dev, "Sub transfer completed on channel %d\n" , |
1475 | echan->ch_num); |
1476 | |
1477 | edma_pause(echan); |
1478 | |
1479 | /* Update statistics for tx_status */ |
1480 | edesc->residue -= edesc->sg_len; |
1481 | edesc->residue_stat = edesc->residue; |
1482 | edesc->processed_stat = edesc->processed; |
1483 | } |
1484 | edma_execute(echan); |
1485 | } |
1486 | |
1487 | spin_unlock(lock: &echan->vchan.lock); |
1488 | } |
1489 | |
1490 | /* eDMA interrupt handler */ |
1491 | static irqreturn_t dma_irq_handler(int irq, void *data) |
1492 | { |
1493 | struct edma_cc *ecc = data; |
1494 | int ctlr; |
1495 | u32 sh_ier; |
1496 | u32 sh_ipr; |
1497 | u32 bank; |
1498 | |
1499 | ctlr = ecc->id; |
1500 | if (ctlr < 0) |
1501 | return IRQ_NONE; |
1502 | |
1503 | dev_vdbg(ecc->dev, "dma_irq_handler\n" ); |
1504 | |
1505 | sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, i: 0); |
1506 | if (!sh_ipr) { |
1507 | sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, i: 1); |
1508 | if (!sh_ipr) |
1509 | return IRQ_NONE; |
1510 | sh_ier = edma_shadow0_read_array(ecc, SH_IER, i: 1); |
1511 | bank = 1; |
1512 | } else { |
1513 | sh_ier = edma_shadow0_read_array(ecc, SH_IER, i: 0); |
1514 | bank = 0; |
1515 | } |
1516 | |
1517 | do { |
1518 | u32 slot; |
1519 | u32 channel; |
1520 | |
1521 | slot = __ffs(sh_ipr); |
1522 | sh_ipr &= ~(BIT(slot)); |
1523 | |
1524 | if (sh_ier & BIT(slot)) { |
1525 | channel = (bank << 5) | slot; |
1526 | /* Clear the corresponding IPR bits */ |
1527 | edma_shadow0_write_array(ecc, SH_ICR, i: bank, BIT(slot)); |
1528 | edma_completion_handler(echan: &ecc->slave_chans[channel]); |
1529 | } |
1530 | } while (sh_ipr); |
1531 | |
1532 | edma_shadow0_write(ecc, SH_IEVAL, val: 1); |
1533 | return IRQ_HANDLED; |
1534 | } |
1535 | |
1536 | static void edma_error_handler(struct edma_chan *echan) |
1537 | { |
1538 | struct edma_cc *ecc = echan->ecc; |
1539 | struct device *dev = echan->vchan.chan.device->dev; |
1540 | struct edmacc_param p; |
1541 | int err; |
1542 | |
1543 | if (!echan->edesc) |
1544 | return; |
1545 | |
1546 | spin_lock(lock: &echan->vchan.lock); |
1547 | |
1548 | err = edma_read_slot(ecc, slot: echan->slot[0], param: &p); |
1549 | |
1550 | /* |
1551 | * Issue later based on missed flag which will be sure |
1552 | * to happen as: |
1553 | * (1) we finished transmitting an intermediate slot and |
1554 | * edma_execute is coming up. |
1555 | * (2) or we finished current transfer and issue will |
1556 | * call edma_execute. |
1557 | * |
1558 | * Important note: issuing can be dangerous here and |
1559 | * lead to some nasty recursion when we are in a NULL |
1560 | * slot. So we avoid doing so and set the missed flag. |
1561 | */ |
1562 | if (err || (p.a_b_cnt == 0 && p.ccnt == 0)) { |
1563 | dev_dbg(dev, "Error on null slot, setting miss\n" ); |
1564 | echan->missed = 1; |
1565 | } else { |
1566 | /* |
1567 | * The slot is already programmed but the event got |
1568 | * missed, so its safe to issue it here. |
1569 | */ |
1570 | dev_dbg(dev, "Missed event, TRIGGERING\n" ); |
1571 | edma_clean_channel(echan); |
1572 | edma_stop(echan); |
1573 | edma_start(echan); |
1574 | edma_trigger_channel(echan); |
1575 | } |
1576 | spin_unlock(lock: &echan->vchan.lock); |
1577 | } |
1578 | |
1579 | static inline bool edma_error_pending(struct edma_cc *ecc) |
1580 | { |
1581 | if (edma_read_array(ecc, EDMA_EMR, i: 0) || |
1582 | edma_read_array(ecc, EDMA_EMR, i: 1) || |
1583 | edma_read(ecc, EDMA_QEMR) || edma_read(ecc, EDMA_CCERR)) |
1584 | return true; |
1585 | |
1586 | return false; |
1587 | } |
1588 | |
1589 | /* eDMA error interrupt handler */ |
1590 | static irqreturn_t dma_ccerr_handler(int irq, void *data) |
1591 | { |
1592 | struct edma_cc *ecc = data; |
1593 | int i, j; |
1594 | int ctlr; |
1595 | unsigned int cnt = 0; |
1596 | unsigned int val; |
1597 | |
1598 | ctlr = ecc->id; |
1599 | if (ctlr < 0) |
1600 | return IRQ_NONE; |
1601 | |
1602 | dev_vdbg(ecc->dev, "dma_ccerr_handler\n" ); |
1603 | |
1604 | if (!edma_error_pending(ecc)) { |
1605 | /* |
1606 | * The registers indicate no pending error event but the irq |
1607 | * handler has been called. |
1608 | * Ask eDMA to re-evaluate the error registers. |
1609 | */ |
1610 | dev_err(ecc->dev, "%s: Error interrupt without error event!\n" , |
1611 | __func__); |
1612 | edma_write(ecc, EDMA_EEVAL, val: 1); |
1613 | return IRQ_NONE; |
1614 | } |
1615 | |
1616 | while (1) { |
1617 | /* Event missed register(s) */ |
1618 | for (j = 0; j < 2; j++) { |
1619 | unsigned long emr; |
1620 | |
1621 | val = edma_read_array(ecc, EDMA_EMR, i: j); |
1622 | if (!val) |
1623 | continue; |
1624 | |
1625 | dev_dbg(ecc->dev, "EMR%d 0x%08x\n" , j, val); |
1626 | emr = val; |
1627 | for_each_set_bit(i, &emr, 32) { |
1628 | int k = (j << 5) + i; |
1629 | |
1630 | /* Clear the corresponding EMR bits */ |
1631 | edma_write_array(ecc, EDMA_EMCR, i: j, BIT(i)); |
1632 | /* Clear any SER */ |
1633 | edma_shadow0_write_array(ecc, SH_SECR, i: j, |
1634 | BIT(i)); |
1635 | edma_error_handler(echan: &ecc->slave_chans[k]); |
1636 | } |
1637 | } |
1638 | |
1639 | val = edma_read(ecc, EDMA_QEMR); |
1640 | if (val) { |
1641 | dev_dbg(ecc->dev, "QEMR 0x%02x\n" , val); |
1642 | /* Not reported, just clear the interrupt reason. */ |
1643 | edma_write(ecc, EDMA_QEMCR, val); |
1644 | edma_shadow0_write(ecc, SH_QSECR, val); |
1645 | } |
1646 | |
1647 | val = edma_read(ecc, EDMA_CCERR); |
1648 | if (val) { |
1649 | dev_warn(ecc->dev, "CCERR 0x%08x\n" , val); |
1650 | /* Not reported, just clear the interrupt reason. */ |
1651 | edma_write(ecc, EDMA_CCERRCLR, val); |
1652 | } |
1653 | |
1654 | if (!edma_error_pending(ecc)) |
1655 | break; |
1656 | cnt++; |
1657 | if (cnt > 10) |
1658 | break; |
1659 | } |
1660 | edma_write(ecc, EDMA_EEVAL, val: 1); |
1661 | return IRQ_HANDLED; |
1662 | } |
1663 | |
1664 | /* Alloc channel resources */ |
1665 | static int edma_alloc_chan_resources(struct dma_chan *chan) |
1666 | { |
1667 | struct edma_chan *echan = to_edma_chan(c: chan); |
1668 | struct edma_cc *ecc = echan->ecc; |
1669 | struct device *dev = ecc->dev; |
1670 | enum dma_event_q eventq_no = EVENTQ_DEFAULT; |
1671 | int ret; |
1672 | |
1673 | if (echan->tc) { |
1674 | eventq_no = echan->tc->id; |
1675 | } else if (ecc->tc_list) { |
1676 | /* memcpy channel */ |
1677 | echan->tc = &ecc->tc_list[ecc->info->default_queue]; |
1678 | eventq_no = echan->tc->id; |
1679 | } |
1680 | |
1681 | ret = edma_alloc_channel(echan, eventq_no); |
1682 | if (ret) |
1683 | return ret; |
1684 | |
1685 | echan->slot[0] = edma_alloc_slot(ecc, slot: echan->ch_num); |
1686 | if (echan->slot[0] < 0) { |
1687 | dev_err(dev, "Entry slot allocation failed for channel %u\n" , |
1688 | EDMA_CHAN_SLOT(echan->ch_num)); |
1689 | ret = echan->slot[0]; |
1690 | goto err_slot; |
1691 | } |
1692 | |
1693 | /* Set up channel -> slot mapping for the entry slot */ |
1694 | edma_set_chmap(echan, slot: echan->slot[0]); |
1695 | echan->alloced = true; |
1696 | |
1697 | dev_dbg(dev, "Got eDMA channel %d for virt channel %d (%s trigger)\n" , |
1698 | EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id, |
1699 | echan->hw_triggered ? "HW" : "SW" ); |
1700 | |
1701 | return 0; |
1702 | |
1703 | err_slot: |
1704 | edma_free_channel(echan); |
1705 | return ret; |
1706 | } |
1707 | |
1708 | /* Free channel resources */ |
1709 | static void edma_free_chan_resources(struct dma_chan *chan) |
1710 | { |
1711 | struct edma_chan *echan = to_edma_chan(c: chan); |
1712 | struct device *dev = echan->ecc->dev; |
1713 | int i; |
1714 | |
1715 | /* Terminate transfers */ |
1716 | edma_stop(echan); |
1717 | |
1718 | vchan_free_chan_resources(vc: &echan->vchan); |
1719 | |
1720 | /* Free EDMA PaRAM slots */ |
1721 | for (i = 0; i < EDMA_MAX_SLOTS; i++) { |
1722 | if (echan->slot[i] >= 0) { |
1723 | edma_free_slot(ecc: echan->ecc, slot: echan->slot[i]); |
1724 | echan->slot[i] = -1; |
1725 | } |
1726 | } |
1727 | |
1728 | /* Set entry slot to the dummy slot */ |
1729 | edma_set_chmap(echan, slot: echan->ecc->dummy_slot); |
1730 | |
1731 | /* Free EDMA channel */ |
1732 | if (echan->alloced) { |
1733 | edma_free_channel(echan); |
1734 | echan->alloced = false; |
1735 | } |
1736 | |
1737 | echan->tc = NULL; |
1738 | echan->hw_triggered = false; |
1739 | |
1740 | dev_dbg(dev, "Free eDMA channel %d for virt channel %d\n" , |
1741 | EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id); |
1742 | } |
1743 | |
1744 | /* Send pending descriptor to hardware */ |
1745 | static void edma_issue_pending(struct dma_chan *chan) |
1746 | { |
1747 | struct edma_chan *echan = to_edma_chan(c: chan); |
1748 | unsigned long flags; |
1749 | |
1750 | spin_lock_irqsave(&echan->vchan.lock, flags); |
1751 | if (vchan_issue_pending(vc: &echan->vchan) && !echan->edesc) |
1752 | edma_execute(echan); |
1753 | spin_unlock_irqrestore(lock: &echan->vchan.lock, flags); |
1754 | } |
1755 | |
1756 | /* |
1757 | * This limit exists to avoid a possible infinite loop when waiting for proof |
1758 | * that a particular transfer is completed. This limit can be hit if there |
1759 | * are large bursts to/from slow devices or the CPU is never able to catch |
1760 | * the DMA hardware idle. On an AM335x transferring 48 bytes from the UART |
1761 | * RX-FIFO, as many as 55 loops have been seen. |
1762 | */ |
1763 | #define EDMA_MAX_TR_WAIT_LOOPS 1000 |
1764 | |
1765 | static u32 edma_residue(struct edma_desc *edesc) |
1766 | { |
1767 | bool dst = edesc->direction == DMA_DEV_TO_MEM; |
1768 | int loop_count = EDMA_MAX_TR_WAIT_LOOPS; |
1769 | struct edma_chan *echan = edesc->echan; |
1770 | struct edma_pset *pset = edesc->pset; |
1771 | dma_addr_t done, pos, pos_old; |
1772 | int channel = EDMA_CHAN_SLOT(echan->ch_num); |
1773 | int idx = EDMA_REG_ARRAY_INDEX(channel); |
1774 | int ch_bit = EDMA_CHANNEL_BIT(channel); |
1775 | int event_reg; |
1776 | int i; |
1777 | |
1778 | /* |
1779 | * We always read the dst/src position from the first RamPar |
1780 | * pset. That's the one which is active now. |
1781 | */ |
1782 | pos = edma_get_position(ecc: echan->ecc, slot: echan->slot[0], dst); |
1783 | |
1784 | /* |
1785 | * "pos" may represent a transfer request that is still being |
1786 | * processed by the EDMACC or EDMATC. We will busy wait until |
1787 | * any one of the situations occurs: |
1788 | * 1. while and event is pending for the channel |
1789 | * 2. a position updated |
1790 | * 3. we hit the loop limit |
1791 | */ |
1792 | if (is_slave_direction(direction: edesc->direction)) |
1793 | event_reg = SH_ER; |
1794 | else |
1795 | event_reg = SH_ESR; |
1796 | |
1797 | pos_old = pos; |
1798 | while (edma_shadow0_read_array(ecc: echan->ecc, offset: event_reg, i: idx) & ch_bit) { |
1799 | pos = edma_get_position(ecc: echan->ecc, slot: echan->slot[0], dst); |
1800 | if (pos != pos_old) |
1801 | break; |
1802 | |
1803 | if (!--loop_count) { |
1804 | dev_dbg_ratelimited(echan->vchan.chan.device->dev, |
1805 | "%s: timeout waiting for PaRAM update\n" , |
1806 | __func__); |
1807 | break; |
1808 | } |
1809 | |
1810 | cpu_relax(); |
1811 | } |
1812 | |
1813 | /* |
1814 | * Cyclic is simple. Just subtract pset[0].addr from pos. |
1815 | * |
1816 | * We never update edesc->residue in the cyclic case, so we |
1817 | * can tell the remaining room to the end of the circular |
1818 | * buffer. |
1819 | */ |
1820 | if (edesc->cyclic) { |
1821 | done = pos - pset->addr; |
1822 | edesc->residue_stat = edesc->residue - done; |
1823 | return edesc->residue_stat; |
1824 | } |
1825 | |
1826 | /* |
1827 | * If the position is 0, then EDMA loaded the closing dummy slot, the |
1828 | * transfer is completed |
1829 | */ |
1830 | if (!pos) |
1831 | return 0; |
1832 | /* |
1833 | * For SG operation we catch up with the last processed |
1834 | * status. |
1835 | */ |
1836 | pset += edesc->processed_stat; |
1837 | |
1838 | for (i = edesc->processed_stat; i < edesc->processed; i++, pset++) { |
1839 | /* |
1840 | * If we are inside this pset address range, we know |
1841 | * this is the active one. Get the current delta and |
1842 | * stop walking the psets. |
1843 | */ |
1844 | if (pos >= pset->addr && pos < pset->addr + pset->len) |
1845 | return edesc->residue_stat - (pos - pset->addr); |
1846 | |
1847 | /* Otherwise mark it done and update residue_stat. */ |
1848 | edesc->processed_stat++; |
1849 | edesc->residue_stat -= pset->len; |
1850 | } |
1851 | return edesc->residue_stat; |
1852 | } |
1853 | |
1854 | /* Check request completion status */ |
1855 | static enum dma_status edma_tx_status(struct dma_chan *chan, |
1856 | dma_cookie_t cookie, |
1857 | struct dma_tx_state *txstate) |
1858 | { |
1859 | struct edma_chan *echan = to_edma_chan(c: chan); |
1860 | struct dma_tx_state txstate_tmp; |
1861 | enum dma_status ret; |
1862 | unsigned long flags; |
1863 | |
1864 | ret = dma_cookie_status(chan, cookie, state: txstate); |
1865 | |
1866 | if (ret == DMA_COMPLETE) |
1867 | return ret; |
1868 | |
1869 | /* Provide a dummy dma_tx_state for completion checking */ |
1870 | if (!txstate) |
1871 | txstate = &txstate_tmp; |
1872 | |
1873 | spin_lock_irqsave(&echan->vchan.lock, flags); |
1874 | if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) { |
1875 | txstate->residue = edma_residue(edesc: echan->edesc); |
1876 | } else { |
1877 | struct virt_dma_desc *vdesc = vchan_find_desc(&echan->vchan, |
1878 | cookie); |
1879 | |
1880 | if (vdesc) |
1881 | txstate->residue = to_edma_desc(tx: &vdesc->tx)->residue; |
1882 | else |
1883 | txstate->residue = 0; |
1884 | } |
1885 | |
1886 | /* |
1887 | * Mark the cookie completed if the residue is 0 for non cyclic |
1888 | * transfers |
1889 | */ |
1890 | if (ret != DMA_COMPLETE && !txstate->residue && |
1891 | echan->edesc && echan->edesc->polled && |
1892 | echan->edesc->vdesc.tx.cookie == cookie) { |
1893 | edma_stop(echan); |
1894 | vchan_cookie_complete(vd: &echan->edesc->vdesc); |
1895 | echan->edesc = NULL; |
1896 | edma_execute(echan); |
1897 | ret = DMA_COMPLETE; |
1898 | } |
1899 | |
1900 | spin_unlock_irqrestore(lock: &echan->vchan.lock, flags); |
1901 | |
1902 | return ret; |
1903 | } |
1904 | |
1905 | static bool edma_is_memcpy_channel(int ch_num, s32 *memcpy_channels) |
1906 | { |
1907 | if (!memcpy_channels) |
1908 | return false; |
1909 | while (*memcpy_channels != -1) { |
1910 | if (*memcpy_channels == ch_num) |
1911 | return true; |
1912 | memcpy_channels++; |
1913 | } |
1914 | return false; |
1915 | } |
1916 | |
1917 | #define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ |
1918 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ |
1919 | BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ |
1920 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) |
1921 | |
1922 | static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode) |
1923 | { |
1924 | struct dma_device *s_ddev = &ecc->dma_slave; |
1925 | struct dma_device *m_ddev = NULL; |
1926 | s32 *memcpy_channels = ecc->info->memcpy_channels; |
1927 | int i, j; |
1928 | |
1929 | dma_cap_zero(s_ddev->cap_mask); |
1930 | dma_cap_set(DMA_SLAVE, s_ddev->cap_mask); |
1931 | dma_cap_set(DMA_CYCLIC, s_ddev->cap_mask); |
1932 | if (ecc->legacy_mode && !memcpy_channels) { |
1933 | dev_warn(ecc->dev, |
1934 | "Legacy memcpy is enabled, things might not work\n" ); |
1935 | |
1936 | dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask); |
1937 | dma_cap_set(DMA_INTERLEAVE, s_ddev->cap_mask); |
1938 | s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy; |
1939 | s_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved; |
1940 | s_ddev->directions = BIT(DMA_MEM_TO_MEM); |
1941 | } |
1942 | |
1943 | s_ddev->device_prep_slave_sg = edma_prep_slave_sg; |
1944 | s_ddev->device_prep_dma_cyclic = edma_prep_dma_cyclic; |
1945 | s_ddev->device_alloc_chan_resources = edma_alloc_chan_resources; |
1946 | s_ddev->device_free_chan_resources = edma_free_chan_resources; |
1947 | s_ddev->device_issue_pending = edma_issue_pending; |
1948 | s_ddev->device_tx_status = edma_tx_status; |
1949 | s_ddev->device_config = edma_slave_config; |
1950 | s_ddev->device_pause = edma_dma_pause; |
1951 | s_ddev->device_resume = edma_dma_resume; |
1952 | s_ddev->device_terminate_all = edma_terminate_all; |
1953 | s_ddev->device_synchronize = edma_synchronize; |
1954 | |
1955 | s_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS; |
1956 | s_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS; |
1957 | s_ddev->directions |= (BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV)); |
1958 | s_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
1959 | s_ddev->max_burst = SZ_32K - 1; /* CIDX: 16bit signed */ |
1960 | |
1961 | s_ddev->dev = ecc->dev; |
1962 | INIT_LIST_HEAD(list: &s_ddev->channels); |
1963 | |
1964 | if (memcpy_channels) { |
1965 | m_ddev = devm_kzalloc(dev: ecc->dev, size: sizeof(*m_ddev), GFP_KERNEL); |
1966 | if (!m_ddev) { |
1967 | dev_warn(ecc->dev, "memcpy is disabled due to OoM\n" ); |
1968 | memcpy_channels = NULL; |
1969 | goto ch_setup; |
1970 | } |
1971 | ecc->dma_memcpy = m_ddev; |
1972 | |
1973 | dma_cap_zero(m_ddev->cap_mask); |
1974 | dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask); |
1975 | dma_cap_set(DMA_INTERLEAVE, m_ddev->cap_mask); |
1976 | |
1977 | m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy; |
1978 | m_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved; |
1979 | m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources; |
1980 | m_ddev->device_free_chan_resources = edma_free_chan_resources; |
1981 | m_ddev->device_issue_pending = edma_issue_pending; |
1982 | m_ddev->device_tx_status = edma_tx_status; |
1983 | m_ddev->device_config = edma_slave_config; |
1984 | m_ddev->device_pause = edma_dma_pause; |
1985 | m_ddev->device_resume = edma_dma_resume; |
1986 | m_ddev->device_terminate_all = edma_terminate_all; |
1987 | m_ddev->device_synchronize = edma_synchronize; |
1988 | |
1989 | m_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS; |
1990 | m_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS; |
1991 | m_ddev->directions = BIT(DMA_MEM_TO_MEM); |
1992 | m_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
1993 | |
1994 | m_ddev->dev = ecc->dev; |
1995 | INIT_LIST_HEAD(list: &m_ddev->channels); |
1996 | } else if (!ecc->legacy_mode) { |
1997 | dev_info(ecc->dev, "memcpy is disabled\n" ); |
1998 | } |
1999 | |
2000 | ch_setup: |
2001 | for (i = 0; i < ecc->num_channels; i++) { |
2002 | struct edma_chan *echan = &ecc->slave_chans[i]; |
2003 | echan->ch_num = EDMA_CTLR_CHAN(ecc->id, i); |
2004 | echan->ecc = ecc; |
2005 | echan->vchan.desc_free = edma_desc_free; |
2006 | |
2007 | if (m_ddev && edma_is_memcpy_channel(ch_num: i, memcpy_channels)) |
2008 | vchan_init(vc: &echan->vchan, dmadev: m_ddev); |
2009 | else |
2010 | vchan_init(vc: &echan->vchan, dmadev: s_ddev); |
2011 | |
2012 | INIT_LIST_HEAD(list: &echan->node); |
2013 | for (j = 0; j < EDMA_MAX_SLOTS; j++) |
2014 | echan->slot[j] = -1; |
2015 | } |
2016 | } |
2017 | |
2018 | static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata, |
2019 | struct edma_cc *ecc) |
2020 | { |
2021 | int i; |
2022 | u32 value, cccfg; |
2023 | s8 (*queue_priority_map)[2]; |
2024 | |
2025 | /* Decode the eDMA3 configuration from CCCFG register */ |
2026 | cccfg = edma_read(ecc, EDMA_CCCFG); |
2027 | |
2028 | value = GET_NUM_REGN(cccfg); |
2029 | ecc->num_region = BIT(value); |
2030 | |
2031 | value = GET_NUM_DMACH(cccfg); |
2032 | ecc->num_channels = BIT(value + 1); |
2033 | |
2034 | value = GET_NUM_QDMACH(cccfg); |
2035 | ecc->num_qchannels = value * 2; |
2036 | |
2037 | value = GET_NUM_PAENTRY(cccfg); |
2038 | ecc->num_slots = BIT(value + 4); |
2039 | |
2040 | value = GET_NUM_EVQUE(cccfg); |
2041 | ecc->num_tc = value + 1; |
2042 | |
2043 | ecc->chmap_exist = (cccfg & CHMAP_EXIST) ? true : false; |
2044 | |
2045 | dev_dbg(dev, "eDMA3 CC HW configuration (cccfg: 0x%08x):\n" , cccfg); |
2046 | dev_dbg(dev, "num_region: %u\n" , ecc->num_region); |
2047 | dev_dbg(dev, "num_channels: %u\n" , ecc->num_channels); |
2048 | dev_dbg(dev, "num_qchannels: %u\n" , ecc->num_qchannels); |
2049 | dev_dbg(dev, "num_slots: %u\n" , ecc->num_slots); |
2050 | dev_dbg(dev, "num_tc: %u\n" , ecc->num_tc); |
2051 | dev_dbg(dev, "chmap_exist: %s\n" , ecc->chmap_exist ? "yes" : "no" ); |
2052 | |
2053 | /* Nothing need to be done if queue priority is provided */ |
2054 | if (pdata->queue_priority_mapping) |
2055 | return 0; |
2056 | |
2057 | /* |
2058 | * Configure TC/queue priority as follows: |
2059 | * Q0 - priority 0 |
2060 | * Q1 - priority 1 |
2061 | * Q2 - priority 2 |
2062 | * ... |
2063 | * The meaning of priority numbers: 0 highest priority, 7 lowest |
2064 | * priority. So Q0 is the highest priority queue and the last queue has |
2065 | * the lowest priority. |
2066 | */ |
2067 | queue_priority_map = devm_kcalloc(dev, n: ecc->num_tc + 1, size: sizeof(s8), |
2068 | GFP_KERNEL); |
2069 | if (!queue_priority_map) |
2070 | return -ENOMEM; |
2071 | |
2072 | for (i = 0; i < ecc->num_tc; i++) { |
2073 | queue_priority_map[i][0] = i; |
2074 | queue_priority_map[i][1] = i; |
2075 | } |
2076 | queue_priority_map[i][0] = -1; |
2077 | queue_priority_map[i][1] = -1; |
2078 | |
2079 | pdata->queue_priority_mapping = queue_priority_map; |
2080 | /* Default queue has the lowest priority */ |
2081 | pdata->default_queue = i - 1; |
2082 | |
2083 | return 0; |
2084 | } |
2085 | |
2086 | #if IS_ENABLED(CONFIG_OF) |
2087 | static int edma_xbar_event_map(struct device *dev, struct edma_soc_info *pdata, |
2088 | size_t sz) |
2089 | { |
2090 | const char pname[] = "ti,edma-xbar-event-map" ; |
2091 | struct resource res; |
2092 | void __iomem *xbar; |
2093 | s16 (*xbar_chans)[2]; |
2094 | size_t nelm = sz / sizeof(s16); |
2095 | u32 shift, offset, mux; |
2096 | int ret, i; |
2097 | |
2098 | xbar_chans = devm_kcalloc(dev, n: nelm + 2, size: sizeof(s16), GFP_KERNEL); |
2099 | if (!xbar_chans) |
2100 | return -ENOMEM; |
2101 | |
2102 | ret = of_address_to_resource(dev: dev->of_node, index: 1, r: &res); |
2103 | if (ret) |
2104 | return -ENOMEM; |
2105 | |
2106 | xbar = devm_ioremap(dev, offset: res.start, size: resource_size(res: &res)); |
2107 | if (!xbar) |
2108 | return -ENOMEM; |
2109 | |
2110 | ret = of_property_read_u16_array(np: dev->of_node, propname: pname, out_values: (u16 *)xbar_chans, |
2111 | sz: nelm); |
2112 | if (ret) |
2113 | return -EIO; |
2114 | |
2115 | /* Invalidate last entry for the other user of this mess */ |
2116 | nelm >>= 1; |
2117 | xbar_chans[nelm][0] = -1; |
2118 | xbar_chans[nelm][1] = -1; |
2119 | |
2120 | for (i = 0; i < nelm; i++) { |
2121 | shift = (xbar_chans[i][1] & 0x03) << 3; |
2122 | offset = xbar_chans[i][1] & 0xfffffffc; |
2123 | mux = readl(addr: xbar + offset); |
2124 | mux &= ~(0xff << shift); |
2125 | mux |= xbar_chans[i][0] << shift; |
2126 | writel(val: mux, addr: (xbar + offset)); |
2127 | } |
2128 | |
2129 | pdata->xbar_chans = (const s16 (*)[2]) xbar_chans; |
2130 | return 0; |
2131 | } |
2132 | |
2133 | static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev, |
2134 | bool legacy_mode) |
2135 | { |
2136 | struct edma_soc_info *info; |
2137 | struct property *prop; |
2138 | int sz, ret; |
2139 | |
2140 | info = devm_kzalloc(dev, size: sizeof(struct edma_soc_info), GFP_KERNEL); |
2141 | if (!info) |
2142 | return ERR_PTR(error: -ENOMEM); |
2143 | |
2144 | if (legacy_mode) { |
2145 | prop = of_find_property(np: dev->of_node, name: "ti,edma-xbar-event-map" , |
2146 | lenp: &sz); |
2147 | if (prop) { |
2148 | ret = edma_xbar_event_map(dev, pdata: info, sz); |
2149 | if (ret) |
2150 | return ERR_PTR(error: ret); |
2151 | } |
2152 | return info; |
2153 | } |
2154 | |
2155 | /* Get the list of channels allocated to be used for memcpy */ |
2156 | prop = of_find_property(np: dev->of_node, name: "ti,edma-memcpy-channels" , lenp: &sz); |
2157 | if (prop) { |
2158 | const char pname[] = "ti,edma-memcpy-channels" ; |
2159 | size_t nelm = sz / sizeof(s32); |
2160 | s32 *memcpy_ch; |
2161 | |
2162 | memcpy_ch = devm_kcalloc(dev, n: nelm + 1, size: sizeof(s32), |
2163 | GFP_KERNEL); |
2164 | if (!memcpy_ch) |
2165 | return ERR_PTR(error: -ENOMEM); |
2166 | |
2167 | ret = of_property_read_u32_array(np: dev->of_node, propname: pname, |
2168 | out_values: (u32 *)memcpy_ch, sz: nelm); |
2169 | if (ret) |
2170 | return ERR_PTR(error: ret); |
2171 | |
2172 | memcpy_ch[nelm] = -1; |
2173 | info->memcpy_channels = memcpy_ch; |
2174 | } |
2175 | |
2176 | prop = of_find_property(np: dev->of_node, name: "ti,edma-reserved-slot-ranges" , |
2177 | lenp: &sz); |
2178 | if (prop) { |
2179 | const char pname[] = "ti,edma-reserved-slot-ranges" ; |
2180 | u32 (*tmp)[2]; |
2181 | s16 (*rsv_slots)[2]; |
2182 | size_t nelm = sz / sizeof(*tmp); |
2183 | struct edma_rsv_info *rsv_info; |
2184 | int i; |
2185 | |
2186 | if (!nelm) |
2187 | return info; |
2188 | |
2189 | tmp = kcalloc(n: nelm, size: sizeof(*tmp), GFP_KERNEL); |
2190 | if (!tmp) |
2191 | return ERR_PTR(error: -ENOMEM); |
2192 | |
2193 | rsv_info = devm_kzalloc(dev, size: sizeof(*rsv_info), GFP_KERNEL); |
2194 | if (!rsv_info) { |
2195 | kfree(objp: tmp); |
2196 | return ERR_PTR(error: -ENOMEM); |
2197 | } |
2198 | |
2199 | rsv_slots = devm_kcalloc(dev, n: nelm + 1, size: sizeof(*rsv_slots), |
2200 | GFP_KERNEL); |
2201 | if (!rsv_slots) { |
2202 | kfree(objp: tmp); |
2203 | return ERR_PTR(error: -ENOMEM); |
2204 | } |
2205 | |
2206 | ret = of_property_read_u32_array(np: dev->of_node, propname: pname, |
2207 | out_values: (u32 *)tmp, sz: nelm * 2); |
2208 | if (ret) { |
2209 | kfree(objp: tmp); |
2210 | return ERR_PTR(error: ret); |
2211 | } |
2212 | |
2213 | for (i = 0; i < nelm; i++) { |
2214 | rsv_slots[i][0] = tmp[i][0]; |
2215 | rsv_slots[i][1] = tmp[i][1]; |
2216 | } |
2217 | rsv_slots[nelm][0] = -1; |
2218 | rsv_slots[nelm][1] = -1; |
2219 | |
2220 | info->rsv = rsv_info; |
2221 | info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots; |
2222 | |
2223 | kfree(objp: tmp); |
2224 | } |
2225 | |
2226 | return info; |
2227 | } |
2228 | |
2229 | static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec, |
2230 | struct of_dma *ofdma) |
2231 | { |
2232 | struct edma_cc *ecc = ofdma->of_dma_data; |
2233 | struct dma_chan *chan = NULL; |
2234 | struct edma_chan *echan; |
2235 | int i; |
2236 | |
2237 | if (!ecc || dma_spec->args_count < 1) |
2238 | return NULL; |
2239 | |
2240 | for (i = 0; i < ecc->num_channels; i++) { |
2241 | echan = &ecc->slave_chans[i]; |
2242 | if (echan->ch_num == dma_spec->args[0]) { |
2243 | chan = &echan->vchan.chan; |
2244 | break; |
2245 | } |
2246 | } |
2247 | |
2248 | if (!chan) |
2249 | return NULL; |
2250 | |
2251 | if (echan->ecc->legacy_mode && dma_spec->args_count == 1) |
2252 | goto out; |
2253 | |
2254 | if (!echan->ecc->legacy_mode && dma_spec->args_count == 2 && |
2255 | dma_spec->args[1] < echan->ecc->num_tc) { |
2256 | echan->tc = &echan->ecc->tc_list[dma_spec->args[1]]; |
2257 | goto out; |
2258 | } |
2259 | |
2260 | return NULL; |
2261 | out: |
2262 | /* The channel is going to be used as HW synchronized */ |
2263 | echan->hw_triggered = true; |
2264 | return dma_get_slave_channel(chan); |
2265 | } |
2266 | #else |
2267 | static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev, |
2268 | bool legacy_mode) |
2269 | { |
2270 | return ERR_PTR(-EINVAL); |
2271 | } |
2272 | |
2273 | static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec, |
2274 | struct of_dma *ofdma) |
2275 | { |
2276 | return NULL; |
2277 | } |
2278 | #endif |
2279 | |
2280 | static bool edma_filter_fn(struct dma_chan *chan, void *param); |
2281 | |
2282 | static int edma_probe(struct platform_device *pdev) |
2283 | { |
2284 | struct edma_soc_info *info = pdev->dev.platform_data; |
2285 | s8 (*queue_priority_mapping)[2]; |
2286 | const s16 (*reserved)[2]; |
2287 | int i, irq; |
2288 | char *irq_name; |
2289 | struct resource *mem; |
2290 | struct device_node *node = pdev->dev.of_node; |
2291 | struct device *dev = &pdev->dev; |
2292 | struct edma_cc *ecc; |
2293 | bool legacy_mode = true; |
2294 | int ret; |
2295 | |
2296 | if (node) { |
2297 | const struct of_device_id *match; |
2298 | |
2299 | match = of_match_node(matches: edma_of_ids, node); |
2300 | if (match && (*(u32 *)match->data) == EDMA_BINDING_TPCC) |
2301 | legacy_mode = false; |
2302 | |
2303 | info = edma_setup_info_from_dt(dev, legacy_mode); |
2304 | if (IS_ERR(ptr: info)) { |
2305 | dev_err(dev, "failed to get DT data\n" ); |
2306 | return PTR_ERR(ptr: info); |
2307 | } |
2308 | } |
2309 | |
2310 | if (!info) |
2311 | return -ENODEV; |
2312 | |
2313 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); |
2314 | if (ret) |
2315 | return ret; |
2316 | |
2317 | ecc = devm_kzalloc(dev, size: sizeof(*ecc), GFP_KERNEL); |
2318 | if (!ecc) |
2319 | return -ENOMEM; |
2320 | |
2321 | ecc->dev = dev; |
2322 | ecc->id = pdev->id; |
2323 | ecc->legacy_mode = legacy_mode; |
2324 | /* When booting with DT the pdev->id is -1 */ |
2325 | if (ecc->id < 0) |
2326 | ecc->id = 0; |
2327 | |
2328 | mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "edma3_cc" ); |
2329 | if (!mem) { |
2330 | dev_dbg(dev, "mem resource not found, using index 0\n" ); |
2331 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
2332 | if (!mem) { |
2333 | dev_err(dev, "no mem resource?\n" ); |
2334 | return -ENODEV; |
2335 | } |
2336 | } |
2337 | ecc->base = devm_ioremap_resource(dev, res: mem); |
2338 | if (IS_ERR(ptr: ecc->base)) |
2339 | return PTR_ERR(ptr: ecc->base); |
2340 | |
2341 | platform_set_drvdata(pdev, data: ecc); |
2342 | |
2343 | pm_runtime_enable(dev); |
2344 | ret = pm_runtime_get_sync(dev); |
2345 | if (ret < 0) { |
2346 | dev_err(dev, "pm_runtime_get_sync() failed\n" ); |
2347 | pm_runtime_disable(dev); |
2348 | return ret; |
2349 | } |
2350 | |
2351 | /* Get eDMA3 configuration from IP */ |
2352 | ret = edma_setup_from_hw(dev, pdata: info, ecc); |
2353 | if (ret) |
2354 | goto err_disable_pm; |
2355 | |
2356 | /* Allocate memory based on the information we got from the IP */ |
2357 | ecc->slave_chans = devm_kcalloc(dev, n: ecc->num_channels, |
2358 | size: sizeof(*ecc->slave_chans), GFP_KERNEL); |
2359 | |
2360 | ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots), |
2361 | size: sizeof(unsigned long), GFP_KERNEL); |
2362 | |
2363 | ecc->channels_mask = devm_kcalloc(dev, |
2364 | BITS_TO_LONGS(ecc->num_channels), |
2365 | size: sizeof(unsigned long), GFP_KERNEL); |
2366 | if (!ecc->slave_chans || !ecc->slot_inuse || !ecc->channels_mask) { |
2367 | ret = -ENOMEM; |
2368 | goto err_disable_pm; |
2369 | } |
2370 | |
2371 | /* Mark all channels available initially */ |
2372 | bitmap_fill(dst: ecc->channels_mask, nbits: ecc->num_channels); |
2373 | |
2374 | ecc->default_queue = info->default_queue; |
2375 | |
2376 | if (info->rsv) { |
2377 | /* Set the reserved slots in inuse list */ |
2378 | reserved = info->rsv->rsv_slots; |
2379 | if (reserved) { |
2380 | for (i = 0; reserved[i][0] != -1; i++) |
2381 | bitmap_set(map: ecc->slot_inuse, start: reserved[i][0], |
2382 | nbits: reserved[i][1]); |
2383 | } |
2384 | |
2385 | /* Clear channels not usable for Linux */ |
2386 | reserved = info->rsv->rsv_chans; |
2387 | if (reserved) { |
2388 | for (i = 0; reserved[i][0] != -1; i++) |
2389 | bitmap_clear(map: ecc->channels_mask, start: reserved[i][0], |
2390 | nbits: reserved[i][1]); |
2391 | } |
2392 | } |
2393 | |
2394 | for (i = 0; i < ecc->num_slots; i++) { |
2395 | /* Reset only unused - not reserved - paRAM slots */ |
2396 | if (!test_bit(i, ecc->slot_inuse)) |
2397 | edma_write_slot(ecc, slot: i, param: &dummy_paramset); |
2398 | } |
2399 | |
2400 | irq = platform_get_irq_byname(pdev, "edma3_ccint" ); |
2401 | if (irq < 0 && node) |
2402 | irq = irq_of_parse_and_map(node, index: 0); |
2403 | |
2404 | if (irq > 0) { |
2405 | irq_name = devm_kasprintf(dev, GFP_KERNEL, fmt: "%s_ccint" , |
2406 | dev_name(dev)); |
2407 | ret = devm_request_irq(dev, irq, handler: dma_irq_handler, irqflags: 0, devname: irq_name, |
2408 | dev_id: ecc); |
2409 | if (ret) { |
2410 | dev_err(dev, "CCINT (%d) failed --> %d\n" , irq, ret); |
2411 | goto err_disable_pm; |
2412 | } |
2413 | ecc->ccint = irq; |
2414 | } |
2415 | |
2416 | irq = platform_get_irq_byname(pdev, "edma3_ccerrint" ); |
2417 | if (irq < 0 && node) |
2418 | irq = irq_of_parse_and_map(node, index: 2); |
2419 | |
2420 | if (irq > 0) { |
2421 | irq_name = devm_kasprintf(dev, GFP_KERNEL, fmt: "%s_ccerrint" , |
2422 | dev_name(dev)); |
2423 | ret = devm_request_irq(dev, irq, handler: dma_ccerr_handler, irqflags: 0, devname: irq_name, |
2424 | dev_id: ecc); |
2425 | if (ret) { |
2426 | dev_err(dev, "CCERRINT (%d) failed --> %d\n" , irq, ret); |
2427 | goto err_disable_pm; |
2428 | } |
2429 | ecc->ccerrint = irq; |
2430 | } |
2431 | |
2432 | ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY); |
2433 | if (ecc->dummy_slot < 0) { |
2434 | dev_err(dev, "Can't allocate PaRAM dummy slot\n" ); |
2435 | ret = ecc->dummy_slot; |
2436 | goto err_disable_pm; |
2437 | } |
2438 | |
2439 | queue_priority_mapping = info->queue_priority_mapping; |
2440 | |
2441 | if (!ecc->legacy_mode) { |
2442 | int lowest_priority = 0; |
2443 | unsigned int array_max; |
2444 | struct of_phandle_args tc_args; |
2445 | |
2446 | ecc->tc_list = devm_kcalloc(dev, n: ecc->num_tc, |
2447 | size: sizeof(*ecc->tc_list), GFP_KERNEL); |
2448 | if (!ecc->tc_list) { |
2449 | ret = -ENOMEM; |
2450 | goto err_reg1; |
2451 | } |
2452 | |
2453 | for (i = 0;; i++) { |
2454 | ret = of_parse_phandle_with_fixed_args(np: node, list_name: "ti,tptcs" , |
2455 | cell_count: 1, index: i, out_args: &tc_args); |
2456 | if (ret || i == ecc->num_tc) |
2457 | break; |
2458 | |
2459 | ecc->tc_list[i].node = tc_args.np; |
2460 | ecc->tc_list[i].id = i; |
2461 | queue_priority_mapping[i][1] = tc_args.args[0]; |
2462 | if (queue_priority_mapping[i][1] > lowest_priority) { |
2463 | lowest_priority = queue_priority_mapping[i][1]; |
2464 | info->default_queue = i; |
2465 | } |
2466 | } |
2467 | |
2468 | /* See if we have optional dma-channel-mask array */ |
2469 | array_max = DIV_ROUND_UP(ecc->num_channels, BITS_PER_TYPE(u32)); |
2470 | ret = of_property_read_variable_u32_array(np: node, |
2471 | propname: "dma-channel-mask" , |
2472 | out_values: (u32 *)ecc->channels_mask, |
2473 | sz_min: 1, sz_max: array_max); |
2474 | if (ret > 0 && ret != array_max) |
2475 | dev_warn(dev, "dma-channel-mask is not complete.\n" ); |
2476 | else if (ret == -EOVERFLOW || ret == -ENODATA) |
2477 | dev_warn(dev, |
2478 | "dma-channel-mask is out of range or empty\n" ); |
2479 | } |
2480 | |
2481 | /* Event queue priority mapping */ |
2482 | for (i = 0; queue_priority_mapping[i][0] != -1; i++) |
2483 | edma_assign_priority_to_queue(ecc, queue_no: queue_priority_mapping[i][0], |
2484 | priority: queue_priority_mapping[i][1]); |
2485 | |
2486 | edma_write_array2(ecc, EDMA_DRAE, i: 0, j: 0, val: 0x0); |
2487 | edma_write_array2(ecc, EDMA_DRAE, i: 0, j: 1, val: 0x0); |
2488 | edma_write_array(ecc, EDMA_QRAE, i: 0, val: 0x0); |
2489 | |
2490 | ecc->info = info; |
2491 | |
2492 | /* Init the dma device and channels */ |
2493 | edma_dma_init(ecc, legacy_mode); |
2494 | |
2495 | for (i = 0; i < ecc->num_channels; i++) { |
2496 | /* Do not touch reserved channels */ |
2497 | if (!test_bit(i, ecc->channels_mask)) |
2498 | continue; |
2499 | |
2500 | /* Assign all channels to the default queue */ |
2501 | edma_assign_channel_eventq(echan: &ecc->slave_chans[i], |
2502 | eventq_no: info->default_queue); |
2503 | /* Set entry slot to the dummy slot */ |
2504 | edma_set_chmap(echan: &ecc->slave_chans[i], slot: ecc->dummy_slot); |
2505 | } |
2506 | |
2507 | ecc->dma_slave.filter.map = info->slave_map; |
2508 | ecc->dma_slave.filter.mapcnt = info->slavecnt; |
2509 | ecc->dma_slave.filter.fn = edma_filter_fn; |
2510 | |
2511 | ret = dma_async_device_register(device: &ecc->dma_slave); |
2512 | if (ret) { |
2513 | dev_err(dev, "slave ddev registration failed (%d)\n" , ret); |
2514 | goto err_reg1; |
2515 | } |
2516 | |
2517 | if (ecc->dma_memcpy) { |
2518 | ret = dma_async_device_register(device: ecc->dma_memcpy); |
2519 | if (ret) { |
2520 | dev_err(dev, "memcpy ddev registration failed (%d)\n" , |
2521 | ret); |
2522 | dma_async_device_unregister(device: &ecc->dma_slave); |
2523 | goto err_reg1; |
2524 | } |
2525 | } |
2526 | |
2527 | if (node) |
2528 | of_dma_controller_register(np: node, of_dma_xlate: of_edma_xlate, data: ecc); |
2529 | |
2530 | dev_info(dev, "TI EDMA DMA engine driver\n" ); |
2531 | |
2532 | return 0; |
2533 | |
2534 | err_reg1: |
2535 | edma_free_slot(ecc, slot: ecc->dummy_slot); |
2536 | err_disable_pm: |
2537 | pm_runtime_put_sync(dev); |
2538 | pm_runtime_disable(dev); |
2539 | return ret; |
2540 | } |
2541 | |
2542 | static void edma_cleanupp_vchan(struct dma_device *dmadev) |
2543 | { |
2544 | struct edma_chan *echan, *_echan; |
2545 | |
2546 | list_for_each_entry_safe(echan, _echan, |
2547 | &dmadev->channels, vchan.chan.device_node) { |
2548 | list_del(entry: &echan->vchan.chan.device_node); |
2549 | tasklet_kill(t: &echan->vchan.task); |
2550 | } |
2551 | } |
2552 | |
2553 | static void edma_remove(struct platform_device *pdev) |
2554 | { |
2555 | struct device *dev = &pdev->dev; |
2556 | struct edma_cc *ecc = dev_get_drvdata(dev); |
2557 | |
2558 | devm_free_irq(dev, irq: ecc->ccint, dev_id: ecc); |
2559 | devm_free_irq(dev, irq: ecc->ccerrint, dev_id: ecc); |
2560 | |
2561 | edma_cleanupp_vchan(dmadev: &ecc->dma_slave); |
2562 | |
2563 | if (dev->of_node) |
2564 | of_dma_controller_free(np: dev->of_node); |
2565 | dma_async_device_unregister(device: &ecc->dma_slave); |
2566 | if (ecc->dma_memcpy) |
2567 | dma_async_device_unregister(device: ecc->dma_memcpy); |
2568 | edma_free_slot(ecc, slot: ecc->dummy_slot); |
2569 | pm_runtime_put_sync(dev); |
2570 | pm_runtime_disable(dev); |
2571 | } |
2572 | |
2573 | #ifdef CONFIG_PM_SLEEP |
2574 | static int edma_pm_suspend(struct device *dev) |
2575 | { |
2576 | struct edma_cc *ecc = dev_get_drvdata(dev); |
2577 | struct edma_chan *echan = ecc->slave_chans; |
2578 | int i; |
2579 | |
2580 | for (i = 0; i < ecc->num_channels; i++) { |
2581 | if (echan[i].alloced) |
2582 | edma_setup_interrupt(echan: &echan[i], enable: false); |
2583 | } |
2584 | |
2585 | return 0; |
2586 | } |
2587 | |
2588 | static int edma_pm_resume(struct device *dev) |
2589 | { |
2590 | struct edma_cc *ecc = dev_get_drvdata(dev); |
2591 | struct edma_chan *echan = ecc->slave_chans; |
2592 | int i; |
2593 | s8 (*queue_priority_mapping)[2]; |
2594 | |
2595 | /* re initialize dummy slot to dummy param set */ |
2596 | edma_write_slot(ecc, slot: ecc->dummy_slot, param: &dummy_paramset); |
2597 | |
2598 | queue_priority_mapping = ecc->info->queue_priority_mapping; |
2599 | |
2600 | /* Event queue priority mapping */ |
2601 | for (i = 0; queue_priority_mapping[i][0] != -1; i++) |
2602 | edma_assign_priority_to_queue(ecc, queue_no: queue_priority_mapping[i][0], |
2603 | priority: queue_priority_mapping[i][1]); |
2604 | |
2605 | for (i = 0; i < ecc->num_channels; i++) { |
2606 | if (echan[i].alloced) { |
2607 | /* ensure access through shadow region 0 */ |
2608 | edma_or_array2(ecc, EDMA_DRAE, i: 0, |
2609 | EDMA_REG_ARRAY_INDEX(i), |
2610 | EDMA_CHANNEL_BIT(i)); |
2611 | |
2612 | edma_setup_interrupt(echan: &echan[i], enable: true); |
2613 | |
2614 | /* Set up channel -> slot mapping for the entry slot */ |
2615 | edma_set_chmap(echan: &echan[i], slot: echan[i].slot[0]); |
2616 | } |
2617 | } |
2618 | |
2619 | return 0; |
2620 | } |
2621 | #endif |
2622 | |
2623 | static const struct dev_pm_ops edma_pm_ops = { |
2624 | SET_LATE_SYSTEM_SLEEP_PM_OPS(edma_pm_suspend, edma_pm_resume) |
2625 | }; |
2626 | |
2627 | static struct platform_driver edma_driver = { |
2628 | .probe = edma_probe, |
2629 | .remove_new = edma_remove, |
2630 | .driver = { |
2631 | .name = "edma" , |
2632 | .pm = &edma_pm_ops, |
2633 | .of_match_table = edma_of_ids, |
2634 | }, |
2635 | }; |
2636 | |
2637 | static int edma_tptc_probe(struct platform_device *pdev) |
2638 | { |
2639 | pm_runtime_enable(dev: &pdev->dev); |
2640 | return pm_runtime_get_sync(dev: &pdev->dev); |
2641 | } |
2642 | |
2643 | static struct platform_driver edma_tptc_driver = { |
2644 | .probe = edma_tptc_probe, |
2645 | .driver = { |
2646 | .name = "edma3-tptc" , |
2647 | .of_match_table = edma_tptc_of_ids, |
2648 | }, |
2649 | }; |
2650 | |
2651 | static bool edma_filter_fn(struct dma_chan *chan, void *param) |
2652 | { |
2653 | bool match = false; |
2654 | |
2655 | if (chan->device->dev->driver == &edma_driver.driver) { |
2656 | struct edma_chan *echan = to_edma_chan(c: chan); |
2657 | unsigned ch_req = *(unsigned *)param; |
2658 | if (ch_req == echan->ch_num) { |
2659 | /* The channel is going to be used as HW synchronized */ |
2660 | echan->hw_triggered = true; |
2661 | match = true; |
2662 | } |
2663 | } |
2664 | return match; |
2665 | } |
2666 | |
2667 | static int edma_init(void) |
2668 | { |
2669 | int ret; |
2670 | |
2671 | ret = platform_driver_register(&edma_tptc_driver); |
2672 | if (ret) |
2673 | return ret; |
2674 | |
2675 | return platform_driver_register(&edma_driver); |
2676 | } |
2677 | subsys_initcall(edma_init); |
2678 | |
2679 | static void __exit edma_exit(void) |
2680 | { |
2681 | platform_driver_unregister(&edma_driver); |
2682 | platform_driver_unregister(&edma_tptc_driver); |
2683 | } |
2684 | module_exit(edma_exit); |
2685 | |
2686 | MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>" ); |
2687 | MODULE_DESCRIPTION("TI EDMA DMA engine driver" ); |
2688 | MODULE_LICENSE("GPL v2" ); |
2689 | |