1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd. |
4 | * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de> |
5 | */ |
6 | |
7 | #include <linux/bitmap.h> |
8 | #include <linux/bitops.h> |
9 | #include <linux/clk.h> |
10 | #include <linux/dma-mapping.h> |
11 | #include <linux/dmaengine.h> |
12 | #include <linux/err.h> |
13 | #include <linux/interrupt.h> |
14 | #include <linux/io.h> |
15 | #include <linux/log2.h> |
16 | #include <linux/module.h> |
17 | #include <linux/of.h> |
18 | #include <linux/of_dma.h> |
19 | #include <linux/platform_device.h> |
20 | #include <linux/slab.h> |
21 | |
22 | #include <dt-bindings/dma/nbpfaxi.h> |
23 | |
24 | #include "dmaengine.h" |
25 | |
26 | #define NBPF_REG_CHAN_OFFSET 0 |
27 | #define NBPF_REG_CHAN_SIZE 0x40 |
28 | |
29 | /* Channel Current Transaction Byte register */ |
30 | #define NBPF_CHAN_CUR_TR_BYTE 0x20 |
31 | |
32 | /* Channel Status register */ |
33 | #define NBPF_CHAN_STAT 0x24 |
34 | #define NBPF_CHAN_STAT_EN 1 |
35 | #define NBPF_CHAN_STAT_TACT 4 |
36 | #define NBPF_CHAN_STAT_ERR 0x10 |
37 | #define NBPF_CHAN_STAT_END 0x20 |
38 | #define NBPF_CHAN_STAT_TC 0x40 |
39 | #define NBPF_CHAN_STAT_DER 0x400 |
40 | |
41 | /* Channel Control register */ |
42 | #define NBPF_CHAN_CTRL 0x28 |
43 | #define NBPF_CHAN_CTRL_SETEN 1 |
44 | #define NBPF_CHAN_CTRL_CLREN 2 |
45 | #define NBPF_CHAN_CTRL_STG 4 |
46 | #define NBPF_CHAN_CTRL_SWRST 8 |
47 | #define NBPF_CHAN_CTRL_CLRRQ 0x10 |
48 | #define NBPF_CHAN_CTRL_CLREND 0x20 |
49 | #define NBPF_CHAN_CTRL_CLRTC 0x40 |
50 | #define NBPF_CHAN_CTRL_SETSUS 0x100 |
51 | #define NBPF_CHAN_CTRL_CLRSUS 0x200 |
52 | |
53 | /* Channel Configuration register */ |
54 | #define NBPF_CHAN_CFG 0x2c |
55 | #define NBPF_CHAN_CFG_SEL 7 /* terminal SELect: 0..7 */ |
56 | #define NBPF_CHAN_CFG_REQD 8 /* REQuest Direction: DMAREQ is 0: input, 1: output */ |
57 | #define NBPF_CHAN_CFG_LOEN 0x10 /* LOw ENable: low DMA request line is: 0: inactive, 1: active */ |
58 | #define NBPF_CHAN_CFG_HIEN 0x20 /* HIgh ENable: high DMA request line is: 0: inactive, 1: active */ |
59 | #define NBPF_CHAN_CFG_LVL 0x40 /* LeVeL: DMA request line is sensed as 0: edge, 1: level */ |
60 | #define NBPF_CHAN_CFG_AM 0x700 /* ACK Mode: 0: Pulse mode, 1: Level mode, b'1x: Bus Cycle */ |
61 | #define NBPF_CHAN_CFG_SDS 0xf000 /* Source Data Size: 0: 8 bits,... , 7: 1024 bits */ |
62 | #define NBPF_CHAN_CFG_DDS 0xf0000 /* Destination Data Size: as above */ |
63 | #define NBPF_CHAN_CFG_SAD 0x100000 /* Source ADdress counting: 0: increment, 1: fixed */ |
64 | #define NBPF_CHAN_CFG_DAD 0x200000 /* Destination ADdress counting: 0: increment, 1: fixed */ |
65 | #define NBPF_CHAN_CFG_TM 0x400000 /* Transfer Mode: 0: single, 1: block TM */ |
66 | #define NBPF_CHAN_CFG_DEM 0x1000000 /* DMAEND interrupt Mask */ |
67 | #define NBPF_CHAN_CFG_TCM 0x2000000 /* DMATCO interrupt Mask */ |
68 | #define NBPF_CHAN_CFG_SBE 0x8000000 /* Sweep Buffer Enable */ |
69 | #define NBPF_CHAN_CFG_RSEL 0x10000000 /* RM: Register Set sELect */ |
70 | #define NBPF_CHAN_CFG_RSW 0x20000000 /* RM: Register Select sWitch */ |
71 | #define NBPF_CHAN_CFG_REN 0x40000000 /* RM: Register Set Enable */ |
72 | #define NBPF_CHAN_CFG_DMS 0x80000000 /* 0: register mode (RM), 1: link mode (LM) */ |
73 | |
74 | #define NBPF_CHAN_NXLA 0x38 |
75 | #define NBPF_CHAN_CRLA 0x3c |
76 | |
77 | /* Link Header field */ |
78 | #define 1 |
79 | #define 2 |
80 | #define 4 |
81 | #define 8 |
82 | |
83 | #define NBPF_CTRL 0x300 |
84 | #define NBPF_CTRL_PR 1 /* 0: fixed priority, 1: round robin */ |
85 | #define NBPF_CTRL_LVINT 2 /* DMAEND and DMAERR signalling: 0: pulse, 1: level */ |
86 | |
87 | #define NBPF_DSTAT_ER 0x314 |
88 | #define NBPF_DSTAT_END 0x318 |
89 | |
90 | #define NBPF_DMA_BUSWIDTHS \ |
91 | (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ |
92 | BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ |
93 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ |
94 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ |
95 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) |
96 | |
97 | struct nbpf_config { |
98 | int num_channels; |
99 | int buffer_size; |
100 | }; |
101 | |
102 | /* |
103 | * We've got 3 types of objects, used to describe DMA transfers: |
104 | * 1. high-level descriptor, containing a struct dma_async_tx_descriptor object |
105 | * in it, used to communicate with the user |
106 | * 2. hardware DMA link descriptors, that we pass to DMAC for DMA transfer |
107 | * queuing, these must be DMAable, using either the streaming DMA API or |
108 | * allocated from coherent memory - one per SG segment |
109 | * 3. one per SG segment descriptors, used to manage HW link descriptors from |
110 | * (2). They do not have to be DMAable. They can either be (a) allocated |
111 | * together with link descriptors as mixed (DMA / CPU) objects, or (b) |
112 | * separately. Even if allocated separately it would be best to link them |
113 | * to link descriptors once during channel resource allocation and always |
114 | * use them as a single object. |
115 | * Therefore for both cases (a) and (b) at run-time objects (2) and (3) shall be |
116 | * treated as a single SG segment descriptor. |
117 | */ |
118 | |
119 | struct nbpf_link_reg { |
120 | u32 ; |
121 | u32 src_addr; |
122 | u32 dst_addr; |
123 | u32 transaction_size; |
124 | u32 config; |
125 | u32 interval; |
126 | u32 extension; |
127 | u32 next; |
128 | } __packed; |
129 | |
130 | struct nbpf_device; |
131 | struct nbpf_channel; |
132 | struct nbpf_desc; |
133 | |
134 | struct nbpf_link_desc { |
135 | struct nbpf_link_reg *hwdesc; |
136 | dma_addr_t hwdesc_dma_addr; |
137 | struct nbpf_desc *desc; |
138 | struct list_head node; |
139 | }; |
140 | |
141 | /** |
142 | * struct nbpf_desc - DMA transfer descriptor |
143 | * @async_tx: dmaengine object |
144 | * @user_wait: waiting for a user ack |
145 | * @length: total transfer length |
146 | * @chan: associated DMAC channel |
147 | * @sg: list of hardware descriptors, represented by struct nbpf_link_desc |
148 | * @node: member in channel descriptor lists |
149 | */ |
150 | struct nbpf_desc { |
151 | struct dma_async_tx_descriptor async_tx; |
152 | bool user_wait; |
153 | size_t length; |
154 | struct nbpf_channel *chan; |
155 | struct list_head sg; |
156 | struct list_head node; |
157 | }; |
158 | |
159 | /* Take a wild guess: allocate 4 segments per descriptor */ |
160 | #define NBPF_SEGMENTS_PER_DESC 4 |
161 | #define NBPF_DESCS_PER_PAGE ((PAGE_SIZE - sizeof(struct list_head)) / \ |
162 | (sizeof(struct nbpf_desc) + \ |
163 | NBPF_SEGMENTS_PER_DESC * \ |
164 | (sizeof(struct nbpf_link_desc) + sizeof(struct nbpf_link_reg)))) |
165 | #define NBPF_SEGMENTS_PER_PAGE (NBPF_SEGMENTS_PER_DESC * NBPF_DESCS_PER_PAGE) |
166 | |
167 | struct nbpf_desc_page { |
168 | struct list_head node; |
169 | struct nbpf_desc desc[NBPF_DESCS_PER_PAGE]; |
170 | struct nbpf_link_desc ldesc[NBPF_SEGMENTS_PER_PAGE]; |
171 | struct nbpf_link_reg hwdesc[NBPF_SEGMENTS_PER_PAGE]; |
172 | }; |
173 | |
174 | /** |
175 | * struct nbpf_channel - one DMAC channel |
176 | * @dma_chan: standard dmaengine channel object |
177 | * @tasklet: channel specific tasklet used for callbacks |
178 | * @base: register address base |
179 | * @nbpf: DMAC |
180 | * @name: IRQ name |
181 | * @irq: IRQ number |
182 | * @slave_src_addr: source address for slave DMA |
183 | * @slave_src_width: source slave data size in bytes |
184 | * @slave_src_burst: maximum source slave burst size in bytes |
185 | * @slave_dst_addr: destination address for slave DMA |
186 | * @slave_dst_width: destination slave data size in bytes |
187 | * @slave_dst_burst: maximum destination slave burst size in bytes |
188 | * @terminal: DMA terminal, assigned to this channel |
189 | * @dmarq_cfg: DMA request line configuration - high / low, edge / level for NBPF_CHAN_CFG |
190 | * @flags: configuration flags from DT |
191 | * @lock: protect descriptor lists |
192 | * @free_links: list of free link descriptors |
193 | * @free: list of free descriptors |
194 | * @queued: list of queued descriptors |
195 | * @active: list of descriptors, scheduled for processing |
196 | * @done: list of completed descriptors, waiting post-processing |
197 | * @desc_page: list of additionally allocated descriptor pages - if any |
198 | * @running: linked descriptor of running transaction |
199 | * @paused: are translations on this channel paused? |
200 | */ |
201 | struct nbpf_channel { |
202 | struct dma_chan dma_chan; |
203 | struct tasklet_struct tasklet; |
204 | void __iomem *base; |
205 | struct nbpf_device *nbpf; |
206 | char name[16]; |
207 | int irq; |
208 | dma_addr_t slave_src_addr; |
209 | size_t slave_src_width; |
210 | size_t slave_src_burst; |
211 | dma_addr_t slave_dst_addr; |
212 | size_t slave_dst_width; |
213 | size_t slave_dst_burst; |
214 | unsigned int terminal; |
215 | u32 dmarq_cfg; |
216 | unsigned long flags; |
217 | spinlock_t lock; |
218 | struct list_head free_links; |
219 | struct list_head free; |
220 | struct list_head queued; |
221 | struct list_head active; |
222 | struct list_head done; |
223 | struct list_head desc_page; |
224 | struct nbpf_desc *running; |
225 | bool paused; |
226 | }; |
227 | |
228 | struct nbpf_device { |
229 | struct dma_device dma_dev; |
230 | void __iomem *base; |
231 | u32 max_burst_mem_read; |
232 | u32 max_burst_mem_write; |
233 | struct clk *clk; |
234 | const struct nbpf_config *config; |
235 | unsigned int eirq; |
236 | struct nbpf_channel chan[]; |
237 | }; |
238 | |
239 | enum nbpf_model { |
240 | NBPF1B4, |
241 | NBPF1B8, |
242 | NBPF1B16, |
243 | NBPF4B4, |
244 | NBPF4B8, |
245 | NBPF4B16, |
246 | NBPF8B4, |
247 | NBPF8B8, |
248 | NBPF8B16, |
249 | }; |
250 | |
251 | static struct nbpf_config nbpf_cfg[] = { |
252 | [NBPF1B4] = { |
253 | .num_channels = 1, |
254 | .buffer_size = 4, |
255 | }, |
256 | [NBPF1B8] = { |
257 | .num_channels = 1, |
258 | .buffer_size = 8, |
259 | }, |
260 | [NBPF1B16] = { |
261 | .num_channels = 1, |
262 | .buffer_size = 16, |
263 | }, |
264 | [NBPF4B4] = { |
265 | .num_channels = 4, |
266 | .buffer_size = 4, |
267 | }, |
268 | [NBPF4B8] = { |
269 | .num_channels = 4, |
270 | .buffer_size = 8, |
271 | }, |
272 | [NBPF4B16] = { |
273 | .num_channels = 4, |
274 | .buffer_size = 16, |
275 | }, |
276 | [NBPF8B4] = { |
277 | .num_channels = 8, |
278 | .buffer_size = 4, |
279 | }, |
280 | [NBPF8B8] = { |
281 | .num_channels = 8, |
282 | .buffer_size = 8, |
283 | }, |
284 | [NBPF8B16] = { |
285 | .num_channels = 8, |
286 | .buffer_size = 16, |
287 | }, |
288 | }; |
289 | |
290 | #define nbpf_to_chan(d) container_of(d, struct nbpf_channel, dma_chan) |
291 | |
292 | /* |
293 | * dmaengine drivers seem to have a lot in common and instead of sharing more |
294 | * code, they reimplement those common algorithms independently. In this driver |
295 | * we try to separate the hardware-specific part from the (largely) generic |
296 | * part. This improves code readability and makes it possible in the future to |
297 | * reuse the generic code in form of a helper library. That generic code should |
298 | * be suitable for various DMA controllers, using transfer descriptors in RAM |
299 | * and pushing one SG list at a time to the DMA controller. |
300 | */ |
301 | |
302 | /* Hardware-specific part */ |
303 | |
304 | static inline u32 nbpf_chan_read(struct nbpf_channel *chan, |
305 | unsigned int offset) |
306 | { |
307 | u32 data = ioread32(chan->base + offset); |
308 | dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n" , |
309 | __func__, chan->base, offset, data); |
310 | return data; |
311 | } |
312 | |
313 | static inline void nbpf_chan_write(struct nbpf_channel *chan, |
314 | unsigned int offset, u32 data) |
315 | { |
316 | iowrite32(data, chan->base + offset); |
317 | dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n" , |
318 | __func__, chan->base, offset, data); |
319 | } |
320 | |
321 | static inline u32 nbpf_read(struct nbpf_device *nbpf, |
322 | unsigned int offset) |
323 | { |
324 | u32 data = ioread32(nbpf->base + offset); |
325 | dev_dbg(nbpf->dma_dev.dev, "%s(0x%p + 0x%x) = 0x%x\n" , |
326 | __func__, nbpf->base, offset, data); |
327 | return data; |
328 | } |
329 | |
330 | static inline void nbpf_write(struct nbpf_device *nbpf, |
331 | unsigned int offset, u32 data) |
332 | { |
333 | iowrite32(data, nbpf->base + offset); |
334 | dev_dbg(nbpf->dma_dev.dev, "%s(0x%p + 0x%x) = 0x%x\n" , |
335 | __func__, nbpf->base, offset, data); |
336 | } |
337 | |
338 | static void nbpf_chan_halt(struct nbpf_channel *chan) |
339 | { |
340 | nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN); |
341 | } |
342 | |
343 | static bool nbpf_status_get(struct nbpf_channel *chan) |
344 | { |
345 | u32 status = nbpf_read(nbpf: chan->nbpf, NBPF_DSTAT_END); |
346 | |
347 | return status & BIT(chan - chan->nbpf->chan); |
348 | } |
349 | |
350 | static void nbpf_status_ack(struct nbpf_channel *chan) |
351 | { |
352 | nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREND); |
353 | } |
354 | |
355 | static u32 nbpf_error_get(struct nbpf_device *nbpf) |
356 | { |
357 | return nbpf_read(nbpf, NBPF_DSTAT_ER); |
358 | } |
359 | |
360 | static struct nbpf_channel *nbpf_error_get_channel(struct nbpf_device *nbpf, u32 error) |
361 | { |
362 | return nbpf->chan + __ffs(error); |
363 | } |
364 | |
365 | static void nbpf_error_clear(struct nbpf_channel *chan) |
366 | { |
367 | u32 status; |
368 | int i; |
369 | |
370 | /* Stop the channel, make sure DMA has been aborted */ |
371 | nbpf_chan_halt(chan); |
372 | |
373 | for (i = 1000; i; i--) { |
374 | status = nbpf_chan_read(chan, NBPF_CHAN_STAT); |
375 | if (!(status & NBPF_CHAN_STAT_TACT)) |
376 | break; |
377 | cpu_relax(); |
378 | } |
379 | |
380 | if (!i) |
381 | dev_err(chan->dma_chan.device->dev, |
382 | "%s(): abort timeout, channel status 0x%x\n" , __func__, status); |
383 | |
384 | nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SWRST); |
385 | } |
386 | |
387 | static int nbpf_start(struct nbpf_desc *desc) |
388 | { |
389 | struct nbpf_channel *chan = desc->chan; |
390 | struct nbpf_link_desc *ldesc = list_first_entry(&desc->sg, struct nbpf_link_desc, node); |
391 | |
392 | nbpf_chan_write(chan, NBPF_CHAN_NXLA, data: (u32)ldesc->hwdesc_dma_addr); |
393 | nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETEN | NBPF_CHAN_CTRL_CLRSUS); |
394 | chan->paused = false; |
395 | |
396 | /* Software trigger MEMCPY - only MEMCPY uses the block mode */ |
397 | if (ldesc->hwdesc->config & NBPF_CHAN_CFG_TM) |
398 | nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_STG); |
399 | |
400 | dev_dbg(chan->nbpf->dma_dev.dev, "%s(): next 0x%x, cur 0x%x\n" , __func__, |
401 | nbpf_chan_read(chan, NBPF_CHAN_NXLA), nbpf_chan_read(chan, NBPF_CHAN_CRLA)); |
402 | |
403 | return 0; |
404 | } |
405 | |
406 | static void nbpf_chan_prepare(struct nbpf_channel *chan) |
407 | { |
408 | chan->dmarq_cfg = (chan->flags & NBPF_SLAVE_RQ_HIGH ? NBPF_CHAN_CFG_HIEN : 0) | |
409 | (chan->flags & NBPF_SLAVE_RQ_LOW ? NBPF_CHAN_CFG_LOEN : 0) | |
410 | (chan->flags & NBPF_SLAVE_RQ_LEVEL ? |
411 | NBPF_CHAN_CFG_LVL | (NBPF_CHAN_CFG_AM & 0x200) : 0) | |
412 | chan->terminal; |
413 | } |
414 | |
415 | static void nbpf_chan_prepare_default(struct nbpf_channel *chan) |
416 | { |
417 | /* Don't output DMAACK */ |
418 | chan->dmarq_cfg = NBPF_CHAN_CFG_AM & 0x400; |
419 | chan->terminal = 0; |
420 | chan->flags = 0; |
421 | } |
422 | |
423 | static void nbpf_chan_configure(struct nbpf_channel *chan) |
424 | { |
425 | /* |
426 | * We assume, that only the link mode and DMA request line configuration |
427 | * have to be set in the configuration register manually. Dynamic |
428 | * per-transfer configuration will be loaded from transfer descriptors. |
429 | */ |
430 | nbpf_chan_write(chan, NBPF_CHAN_CFG, NBPF_CHAN_CFG_DMS | chan->dmarq_cfg); |
431 | } |
432 | |
433 | static u32 nbpf_xfer_ds(struct nbpf_device *nbpf, size_t size, |
434 | enum dma_transfer_direction direction) |
435 | { |
436 | int max_burst = nbpf->config->buffer_size * 8; |
437 | |
438 | if (nbpf->max_burst_mem_read || nbpf->max_burst_mem_write) { |
439 | switch (direction) { |
440 | case DMA_MEM_TO_MEM: |
441 | max_burst = min_not_zero(nbpf->max_burst_mem_read, |
442 | nbpf->max_burst_mem_write); |
443 | break; |
444 | case DMA_MEM_TO_DEV: |
445 | if (nbpf->max_burst_mem_read) |
446 | max_burst = nbpf->max_burst_mem_read; |
447 | break; |
448 | case DMA_DEV_TO_MEM: |
449 | if (nbpf->max_burst_mem_write) |
450 | max_burst = nbpf->max_burst_mem_write; |
451 | break; |
452 | case DMA_DEV_TO_DEV: |
453 | default: |
454 | break; |
455 | } |
456 | } |
457 | |
458 | /* Maximum supported bursts depend on the buffer size */ |
459 | return min_t(int, __ffs(size), ilog2(max_burst)); |
460 | } |
461 | |
462 | static size_t nbpf_xfer_size(struct nbpf_device *nbpf, |
463 | enum dma_slave_buswidth width, u32 burst) |
464 | { |
465 | size_t size; |
466 | |
467 | if (!burst) |
468 | burst = 1; |
469 | |
470 | switch (width) { |
471 | case DMA_SLAVE_BUSWIDTH_8_BYTES: |
472 | size = 8 * burst; |
473 | break; |
474 | |
475 | case DMA_SLAVE_BUSWIDTH_4_BYTES: |
476 | size = 4 * burst; |
477 | break; |
478 | |
479 | case DMA_SLAVE_BUSWIDTH_2_BYTES: |
480 | size = 2 * burst; |
481 | break; |
482 | |
483 | default: |
484 | pr_warn("%s(): invalid bus width %u\n" , __func__, width); |
485 | fallthrough; |
486 | case DMA_SLAVE_BUSWIDTH_1_BYTE: |
487 | size = burst; |
488 | } |
489 | |
490 | return nbpf_xfer_ds(nbpf, size, direction: DMA_TRANS_NONE); |
491 | } |
492 | |
493 | /* |
494 | * We need a way to recognise slaves, whose data is sent "raw" over the bus, |
495 | * i.e. it isn't known in advance how many bytes will be received. Therefore |
496 | * the slave driver has to provide a "large enough" buffer and either read the |
497 | * buffer, when it is full, or detect, that some data has arrived, then wait for |
498 | * a timeout, if no more data arrives - receive what's already there. We want to |
499 | * handle such slaves in a special way to allow an optimised mode for other |
500 | * users, for whom the amount of data is known in advance. So far there's no way |
501 | * to recognise such slaves. We use a data-width check to distinguish between |
502 | * the SD host and the PL011 UART. |
503 | */ |
504 | |
505 | static int nbpf_prep_one(struct nbpf_link_desc *ldesc, |
506 | enum dma_transfer_direction direction, |
507 | dma_addr_t src, dma_addr_t dst, size_t size, bool last) |
508 | { |
509 | struct nbpf_link_reg *hwdesc = ldesc->hwdesc; |
510 | struct nbpf_desc *desc = ldesc->desc; |
511 | struct nbpf_channel *chan = desc->chan; |
512 | struct device *dev = chan->dma_chan.device->dev; |
513 | size_t mem_xfer, slave_xfer; |
514 | bool can_burst; |
515 | |
516 | hwdesc->header = NBPF_HEADER_WBD | NBPF_HEADER_LV | |
517 | (last ? NBPF_HEADER_LE : 0); |
518 | |
519 | hwdesc->src_addr = src; |
520 | hwdesc->dst_addr = dst; |
521 | hwdesc->transaction_size = size; |
522 | |
523 | /* |
524 | * set config: SAD, DAD, DDS, SDS, etc. |
525 | * Note on transfer sizes: the DMAC can perform unaligned DMA transfers, |
526 | * but it is important to have transaction size a multiple of both |
527 | * receiver and transmitter transfer sizes. It is also possible to use |
528 | * different RAM and device transfer sizes, and it does work well with |
529 | * some devices, e.g. with V08R07S01E SD host controllers, which can use |
530 | * 128 byte transfers. But this doesn't work with other devices, |
531 | * especially when the transaction size is unknown. This is the case, |
532 | * e.g. with serial drivers like amba-pl011.c. For reception it sets up |
533 | * the transaction size of 4K and if fewer bytes are received, it |
534 | * pauses DMA and reads out data received via DMA as well as those left |
535 | * in the Rx FIFO. For this to work with the RAM side using burst |
536 | * transfers we enable the SBE bit and terminate the transfer in our |
537 | * .device_pause handler. |
538 | */ |
539 | mem_xfer = nbpf_xfer_ds(nbpf: chan->nbpf, size, direction); |
540 | |
541 | switch (direction) { |
542 | case DMA_DEV_TO_MEM: |
543 | can_burst = chan->slave_src_width >= 3; |
544 | slave_xfer = min(mem_xfer, can_burst ? |
545 | chan->slave_src_burst : chan->slave_src_width); |
546 | /* |
547 | * Is the slave narrower than 64 bits, i.e. isn't using the full |
548 | * bus width and cannot use bursts? |
549 | */ |
550 | if (mem_xfer > chan->slave_src_burst && !can_burst) |
551 | mem_xfer = chan->slave_src_burst; |
552 | /* Device-to-RAM DMA is unreliable without REQD set */ |
553 | hwdesc->config = NBPF_CHAN_CFG_SAD | (NBPF_CHAN_CFG_DDS & (mem_xfer << 16)) | |
554 | (NBPF_CHAN_CFG_SDS & (slave_xfer << 12)) | NBPF_CHAN_CFG_REQD | |
555 | NBPF_CHAN_CFG_SBE; |
556 | break; |
557 | |
558 | case DMA_MEM_TO_DEV: |
559 | slave_xfer = min(mem_xfer, chan->slave_dst_width >= 3 ? |
560 | chan->slave_dst_burst : chan->slave_dst_width); |
561 | hwdesc->config = NBPF_CHAN_CFG_DAD | (NBPF_CHAN_CFG_SDS & (mem_xfer << 12)) | |
562 | (NBPF_CHAN_CFG_DDS & (slave_xfer << 16)) | NBPF_CHAN_CFG_REQD; |
563 | break; |
564 | |
565 | case DMA_MEM_TO_MEM: |
566 | hwdesc->config = NBPF_CHAN_CFG_TCM | NBPF_CHAN_CFG_TM | |
567 | (NBPF_CHAN_CFG_SDS & (mem_xfer << 12)) | |
568 | (NBPF_CHAN_CFG_DDS & (mem_xfer << 16)); |
569 | break; |
570 | |
571 | default: |
572 | return -EINVAL; |
573 | } |
574 | |
575 | hwdesc->config |= chan->dmarq_cfg | (last ? 0 : NBPF_CHAN_CFG_DEM) | |
576 | NBPF_CHAN_CFG_DMS; |
577 | |
578 | dev_dbg(dev, "%s(): desc @ %pad: hdr 0x%x, cfg 0x%x, %zu @ %pad -> %pad\n" , |
579 | __func__, &ldesc->hwdesc_dma_addr, hwdesc->header, |
580 | hwdesc->config, size, &src, &dst); |
581 | |
582 | dma_sync_single_for_device(dev, addr: ldesc->hwdesc_dma_addr, size: sizeof(*hwdesc), |
583 | dir: DMA_TO_DEVICE); |
584 | |
585 | return 0; |
586 | } |
587 | |
588 | static size_t nbpf_bytes_left(struct nbpf_channel *chan) |
589 | { |
590 | return nbpf_chan_read(chan, NBPF_CHAN_CUR_TR_BYTE); |
591 | } |
592 | |
593 | static void nbpf_configure(struct nbpf_device *nbpf) |
594 | { |
595 | nbpf_write(nbpf, NBPF_CTRL, NBPF_CTRL_LVINT); |
596 | } |
597 | |
598 | /* Generic part */ |
599 | |
600 | /* DMA ENGINE functions */ |
601 | static void nbpf_issue_pending(struct dma_chan *dchan) |
602 | { |
603 | struct nbpf_channel *chan = nbpf_to_chan(dchan); |
604 | unsigned long flags; |
605 | |
606 | dev_dbg(dchan->device->dev, "Entry %s()\n" , __func__); |
607 | |
608 | spin_lock_irqsave(&chan->lock, flags); |
609 | if (list_empty(head: &chan->queued)) |
610 | goto unlock; |
611 | |
612 | list_splice_tail_init(list: &chan->queued, head: &chan->active); |
613 | |
614 | if (!chan->running) { |
615 | struct nbpf_desc *desc = list_first_entry(&chan->active, |
616 | struct nbpf_desc, node); |
617 | if (!nbpf_start(desc)) |
618 | chan->running = desc; |
619 | } |
620 | |
621 | unlock: |
622 | spin_unlock_irqrestore(lock: &chan->lock, flags); |
623 | } |
624 | |
625 | static enum dma_status nbpf_tx_status(struct dma_chan *dchan, |
626 | dma_cookie_t cookie, struct dma_tx_state *state) |
627 | { |
628 | struct nbpf_channel *chan = nbpf_to_chan(dchan); |
629 | enum dma_status status = dma_cookie_status(chan: dchan, cookie, state); |
630 | |
631 | if (state) { |
632 | dma_cookie_t running; |
633 | unsigned long flags; |
634 | |
635 | spin_lock_irqsave(&chan->lock, flags); |
636 | running = chan->running ? chan->running->async_tx.cookie : -EINVAL; |
637 | |
638 | if (cookie == running) { |
639 | state->residue = nbpf_bytes_left(chan); |
640 | dev_dbg(dchan->device->dev, "%s(): residue %u\n" , __func__, |
641 | state->residue); |
642 | } else if (status == DMA_IN_PROGRESS) { |
643 | struct nbpf_desc *desc; |
644 | bool found = false; |
645 | |
646 | list_for_each_entry(desc, &chan->active, node) |
647 | if (desc->async_tx.cookie == cookie) { |
648 | found = true; |
649 | break; |
650 | } |
651 | |
652 | if (!found) |
653 | list_for_each_entry(desc, &chan->queued, node) |
654 | if (desc->async_tx.cookie == cookie) { |
655 | found = true; |
656 | break; |
657 | |
658 | } |
659 | |
660 | state->residue = found ? desc->length : 0; |
661 | } |
662 | |
663 | spin_unlock_irqrestore(lock: &chan->lock, flags); |
664 | } |
665 | |
666 | if (chan->paused) |
667 | status = DMA_PAUSED; |
668 | |
669 | return status; |
670 | } |
671 | |
672 | static dma_cookie_t nbpf_tx_submit(struct dma_async_tx_descriptor *tx) |
673 | { |
674 | struct nbpf_desc *desc = container_of(tx, struct nbpf_desc, async_tx); |
675 | struct nbpf_channel *chan = desc->chan; |
676 | unsigned long flags; |
677 | dma_cookie_t cookie; |
678 | |
679 | spin_lock_irqsave(&chan->lock, flags); |
680 | cookie = dma_cookie_assign(tx); |
681 | list_add_tail(new: &desc->node, head: &chan->queued); |
682 | spin_unlock_irqrestore(lock: &chan->lock, flags); |
683 | |
684 | dev_dbg(chan->dma_chan.device->dev, "Entry %s(%d)\n" , __func__, cookie); |
685 | |
686 | return cookie; |
687 | } |
688 | |
689 | static int nbpf_desc_page_alloc(struct nbpf_channel *chan) |
690 | { |
691 | struct dma_chan *dchan = &chan->dma_chan; |
692 | struct nbpf_desc_page *dpage = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); |
693 | struct nbpf_link_desc *ldesc; |
694 | struct nbpf_link_reg *hwdesc; |
695 | struct nbpf_desc *desc; |
696 | LIST_HEAD(head); |
697 | LIST_HEAD(lhead); |
698 | int i; |
699 | struct device *dev = dchan->device->dev; |
700 | |
701 | if (!dpage) |
702 | return -ENOMEM; |
703 | |
704 | dev_dbg(dev, "%s(): alloc %lu descriptors, %lu segments, total alloc %zu\n" , |
705 | __func__, NBPF_DESCS_PER_PAGE, NBPF_SEGMENTS_PER_PAGE, sizeof(*dpage)); |
706 | |
707 | for (i = 0, ldesc = dpage->ldesc, hwdesc = dpage->hwdesc; |
708 | i < ARRAY_SIZE(dpage->ldesc); |
709 | i++, ldesc++, hwdesc++) { |
710 | ldesc->hwdesc = hwdesc; |
711 | list_add_tail(new: &ldesc->node, head: &lhead); |
712 | ldesc->hwdesc_dma_addr = dma_map_single(dchan->device->dev, |
713 | hwdesc, sizeof(*hwdesc), DMA_TO_DEVICE); |
714 | |
715 | dev_dbg(dev, "%s(): mapped 0x%p to %pad\n" , __func__, |
716 | hwdesc, &ldesc->hwdesc_dma_addr); |
717 | } |
718 | |
719 | for (i = 0, desc = dpage->desc; |
720 | i < ARRAY_SIZE(dpage->desc); |
721 | i++, desc++) { |
722 | dma_async_tx_descriptor_init(tx: &desc->async_tx, chan: dchan); |
723 | desc->async_tx.tx_submit = nbpf_tx_submit; |
724 | desc->chan = chan; |
725 | INIT_LIST_HEAD(list: &desc->sg); |
726 | list_add_tail(new: &desc->node, head: &head); |
727 | } |
728 | |
729 | /* |
730 | * This function cannot be called from interrupt context, so, no need to |
731 | * save flags |
732 | */ |
733 | spin_lock_irq(lock: &chan->lock); |
734 | list_splice_tail(list: &lhead, head: &chan->free_links); |
735 | list_splice_tail(list: &head, head: &chan->free); |
736 | list_add(new: &dpage->node, head: &chan->desc_page); |
737 | spin_unlock_irq(lock: &chan->lock); |
738 | |
739 | return ARRAY_SIZE(dpage->desc); |
740 | } |
741 | |
742 | static void nbpf_desc_put(struct nbpf_desc *desc) |
743 | { |
744 | struct nbpf_channel *chan = desc->chan; |
745 | struct nbpf_link_desc *ldesc, *tmp; |
746 | unsigned long flags; |
747 | |
748 | spin_lock_irqsave(&chan->lock, flags); |
749 | list_for_each_entry_safe(ldesc, tmp, &desc->sg, node) |
750 | list_move(list: &ldesc->node, head: &chan->free_links); |
751 | |
752 | list_add(new: &desc->node, head: &chan->free); |
753 | spin_unlock_irqrestore(lock: &chan->lock, flags); |
754 | } |
755 | |
756 | static void nbpf_scan_acked(struct nbpf_channel *chan) |
757 | { |
758 | struct nbpf_desc *desc, *tmp; |
759 | unsigned long flags; |
760 | LIST_HEAD(head); |
761 | |
762 | spin_lock_irqsave(&chan->lock, flags); |
763 | list_for_each_entry_safe(desc, tmp, &chan->done, node) |
764 | if (async_tx_test_ack(tx: &desc->async_tx) && desc->user_wait) { |
765 | list_move(list: &desc->node, head: &head); |
766 | desc->user_wait = false; |
767 | } |
768 | spin_unlock_irqrestore(lock: &chan->lock, flags); |
769 | |
770 | list_for_each_entry_safe(desc, tmp, &head, node) { |
771 | list_del(entry: &desc->node); |
772 | nbpf_desc_put(desc); |
773 | } |
774 | } |
775 | |
776 | /* |
777 | * We have to allocate descriptors with the channel lock dropped. This means, |
778 | * before we re-acquire the lock buffers can be taken already, so we have to |
779 | * re-check after re-acquiring the lock and possibly retry, if buffers are gone |
780 | * again. |
781 | */ |
782 | static struct nbpf_desc *nbpf_desc_get(struct nbpf_channel *chan, size_t len) |
783 | { |
784 | struct nbpf_desc *desc = NULL; |
785 | struct nbpf_link_desc *ldesc, *prev = NULL; |
786 | |
787 | nbpf_scan_acked(chan); |
788 | |
789 | spin_lock_irq(lock: &chan->lock); |
790 | |
791 | do { |
792 | int i = 0, ret; |
793 | |
794 | if (list_empty(head: &chan->free)) { |
795 | /* No more free descriptors */ |
796 | spin_unlock_irq(lock: &chan->lock); |
797 | ret = nbpf_desc_page_alloc(chan); |
798 | if (ret < 0) |
799 | return NULL; |
800 | spin_lock_irq(lock: &chan->lock); |
801 | continue; |
802 | } |
803 | desc = list_first_entry(&chan->free, struct nbpf_desc, node); |
804 | list_del(entry: &desc->node); |
805 | |
806 | do { |
807 | if (list_empty(head: &chan->free_links)) { |
808 | /* No more free link descriptors */ |
809 | spin_unlock_irq(lock: &chan->lock); |
810 | ret = nbpf_desc_page_alloc(chan); |
811 | if (ret < 0) { |
812 | nbpf_desc_put(desc); |
813 | return NULL; |
814 | } |
815 | spin_lock_irq(lock: &chan->lock); |
816 | continue; |
817 | } |
818 | |
819 | ldesc = list_first_entry(&chan->free_links, |
820 | struct nbpf_link_desc, node); |
821 | ldesc->desc = desc; |
822 | if (prev) |
823 | prev->hwdesc->next = (u32)ldesc->hwdesc_dma_addr; |
824 | |
825 | prev = ldesc; |
826 | list_move_tail(list: &ldesc->node, head: &desc->sg); |
827 | |
828 | i++; |
829 | } while (i < len); |
830 | } while (!desc); |
831 | |
832 | prev->hwdesc->next = 0; |
833 | |
834 | spin_unlock_irq(lock: &chan->lock); |
835 | |
836 | return desc; |
837 | } |
838 | |
839 | static void nbpf_chan_idle(struct nbpf_channel *chan) |
840 | { |
841 | struct nbpf_desc *desc, *tmp; |
842 | unsigned long flags; |
843 | LIST_HEAD(head); |
844 | |
845 | spin_lock_irqsave(&chan->lock, flags); |
846 | |
847 | list_splice_init(list: &chan->done, head: &head); |
848 | list_splice_init(list: &chan->active, head: &head); |
849 | list_splice_init(list: &chan->queued, head: &head); |
850 | |
851 | chan->running = NULL; |
852 | |
853 | spin_unlock_irqrestore(lock: &chan->lock, flags); |
854 | |
855 | list_for_each_entry_safe(desc, tmp, &head, node) { |
856 | dev_dbg(chan->nbpf->dma_dev.dev, "%s(): force-free desc %p cookie %d\n" , |
857 | __func__, desc, desc->async_tx.cookie); |
858 | list_del(entry: &desc->node); |
859 | nbpf_desc_put(desc); |
860 | } |
861 | } |
862 | |
863 | static int nbpf_pause(struct dma_chan *dchan) |
864 | { |
865 | struct nbpf_channel *chan = nbpf_to_chan(dchan); |
866 | |
867 | dev_dbg(dchan->device->dev, "Entry %s\n" , __func__); |
868 | |
869 | chan->paused = true; |
870 | nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETSUS); |
871 | /* See comment in nbpf_prep_one() */ |
872 | nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN); |
873 | |
874 | return 0; |
875 | } |
876 | |
877 | static int nbpf_terminate_all(struct dma_chan *dchan) |
878 | { |
879 | struct nbpf_channel *chan = nbpf_to_chan(dchan); |
880 | |
881 | dev_dbg(dchan->device->dev, "Entry %s\n" , __func__); |
882 | dev_dbg(dchan->device->dev, "Terminating\n" ); |
883 | |
884 | nbpf_chan_halt(chan); |
885 | nbpf_chan_idle(chan); |
886 | |
887 | return 0; |
888 | } |
889 | |
890 | static int nbpf_config(struct dma_chan *dchan, |
891 | struct dma_slave_config *config) |
892 | { |
893 | struct nbpf_channel *chan = nbpf_to_chan(dchan); |
894 | |
895 | dev_dbg(dchan->device->dev, "Entry %s\n" , __func__); |
896 | |
897 | /* |
898 | * We could check config->slave_id to match chan->terminal here, |
899 | * but with DT they would be coming from the same source, so |
900 | * such a check would be superflous |
901 | */ |
902 | |
903 | chan->slave_dst_addr = config->dst_addr; |
904 | chan->slave_dst_width = nbpf_xfer_size(nbpf: chan->nbpf, |
905 | width: config->dst_addr_width, burst: 1); |
906 | chan->slave_dst_burst = nbpf_xfer_size(nbpf: chan->nbpf, |
907 | width: config->dst_addr_width, |
908 | burst: config->dst_maxburst); |
909 | chan->slave_src_addr = config->src_addr; |
910 | chan->slave_src_width = nbpf_xfer_size(nbpf: chan->nbpf, |
911 | width: config->src_addr_width, burst: 1); |
912 | chan->slave_src_burst = nbpf_xfer_size(nbpf: chan->nbpf, |
913 | width: config->src_addr_width, |
914 | burst: config->src_maxburst); |
915 | |
916 | return 0; |
917 | } |
918 | |
919 | static struct dma_async_tx_descriptor *nbpf_prep_sg(struct nbpf_channel *chan, |
920 | struct scatterlist *src_sg, struct scatterlist *dst_sg, |
921 | size_t len, enum dma_transfer_direction direction, |
922 | unsigned long flags) |
923 | { |
924 | struct nbpf_link_desc *ldesc; |
925 | struct scatterlist *mem_sg; |
926 | struct nbpf_desc *desc; |
927 | bool inc_src, inc_dst; |
928 | size_t data_len = 0; |
929 | int i = 0; |
930 | |
931 | switch (direction) { |
932 | case DMA_DEV_TO_MEM: |
933 | mem_sg = dst_sg; |
934 | inc_src = false; |
935 | inc_dst = true; |
936 | break; |
937 | |
938 | case DMA_MEM_TO_DEV: |
939 | mem_sg = src_sg; |
940 | inc_src = true; |
941 | inc_dst = false; |
942 | break; |
943 | |
944 | default: |
945 | case DMA_MEM_TO_MEM: |
946 | mem_sg = src_sg; |
947 | inc_src = true; |
948 | inc_dst = true; |
949 | } |
950 | |
951 | desc = nbpf_desc_get(chan, len); |
952 | if (!desc) |
953 | return NULL; |
954 | |
955 | desc->async_tx.flags = flags; |
956 | desc->async_tx.cookie = -EBUSY; |
957 | desc->user_wait = false; |
958 | |
959 | /* |
960 | * This is a private descriptor list, and we own the descriptor. No need |
961 | * to lock. |
962 | */ |
963 | list_for_each_entry(ldesc, &desc->sg, node) { |
964 | int ret = nbpf_prep_one(ldesc, direction, |
965 | sg_dma_address(src_sg), |
966 | sg_dma_address(dst_sg), |
967 | sg_dma_len(mem_sg), |
968 | last: i == len - 1); |
969 | if (ret < 0) { |
970 | nbpf_desc_put(desc); |
971 | return NULL; |
972 | } |
973 | data_len += sg_dma_len(mem_sg); |
974 | if (inc_src) |
975 | src_sg = sg_next(src_sg); |
976 | if (inc_dst) |
977 | dst_sg = sg_next(dst_sg); |
978 | mem_sg = direction == DMA_DEV_TO_MEM ? dst_sg : src_sg; |
979 | i++; |
980 | } |
981 | |
982 | desc->length = data_len; |
983 | |
984 | /* The user has to return the descriptor to us ASAP via .tx_submit() */ |
985 | return &desc->async_tx; |
986 | } |
987 | |
988 | static struct dma_async_tx_descriptor *nbpf_prep_memcpy( |
989 | struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src, |
990 | size_t len, unsigned long flags) |
991 | { |
992 | struct nbpf_channel *chan = nbpf_to_chan(dchan); |
993 | struct scatterlist dst_sg; |
994 | struct scatterlist src_sg; |
995 | |
996 | sg_init_table(&dst_sg, 1); |
997 | sg_init_table(&src_sg, 1); |
998 | |
999 | sg_dma_address(&dst_sg) = dst; |
1000 | sg_dma_address(&src_sg) = src; |
1001 | |
1002 | sg_dma_len(&dst_sg) = len; |
1003 | sg_dma_len(&src_sg) = len; |
1004 | |
1005 | dev_dbg(dchan->device->dev, "%s(): %zu @ %pad -> %pad\n" , |
1006 | __func__, len, &src, &dst); |
1007 | |
1008 | return nbpf_prep_sg(chan, src_sg: &src_sg, dst_sg: &dst_sg, len: 1, |
1009 | direction: DMA_MEM_TO_MEM, flags); |
1010 | } |
1011 | |
1012 | static struct dma_async_tx_descriptor *nbpf_prep_slave_sg( |
1013 | struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, |
1014 | enum dma_transfer_direction direction, unsigned long flags, void *context) |
1015 | { |
1016 | struct nbpf_channel *chan = nbpf_to_chan(dchan); |
1017 | struct scatterlist slave_sg; |
1018 | |
1019 | dev_dbg(dchan->device->dev, "Entry %s()\n" , __func__); |
1020 | |
1021 | sg_init_table(&slave_sg, 1); |
1022 | |
1023 | switch (direction) { |
1024 | case DMA_MEM_TO_DEV: |
1025 | sg_dma_address(&slave_sg) = chan->slave_dst_addr; |
1026 | return nbpf_prep_sg(chan, src_sg: sgl, dst_sg: &slave_sg, len: sg_len, |
1027 | direction, flags); |
1028 | |
1029 | case DMA_DEV_TO_MEM: |
1030 | sg_dma_address(&slave_sg) = chan->slave_src_addr; |
1031 | return nbpf_prep_sg(chan, src_sg: &slave_sg, dst_sg: sgl, len: sg_len, |
1032 | direction, flags); |
1033 | |
1034 | default: |
1035 | return NULL; |
1036 | } |
1037 | } |
1038 | |
1039 | static int nbpf_alloc_chan_resources(struct dma_chan *dchan) |
1040 | { |
1041 | struct nbpf_channel *chan = nbpf_to_chan(dchan); |
1042 | int ret; |
1043 | |
1044 | INIT_LIST_HEAD(list: &chan->free); |
1045 | INIT_LIST_HEAD(list: &chan->free_links); |
1046 | INIT_LIST_HEAD(list: &chan->queued); |
1047 | INIT_LIST_HEAD(list: &chan->active); |
1048 | INIT_LIST_HEAD(list: &chan->done); |
1049 | |
1050 | ret = nbpf_desc_page_alloc(chan); |
1051 | if (ret < 0) |
1052 | return ret; |
1053 | |
1054 | dev_dbg(dchan->device->dev, "Entry %s(): terminal %u\n" , __func__, |
1055 | chan->terminal); |
1056 | |
1057 | nbpf_chan_configure(chan); |
1058 | |
1059 | return ret; |
1060 | } |
1061 | |
1062 | static void nbpf_free_chan_resources(struct dma_chan *dchan) |
1063 | { |
1064 | struct nbpf_channel *chan = nbpf_to_chan(dchan); |
1065 | struct nbpf_desc_page *dpage, *tmp; |
1066 | |
1067 | dev_dbg(dchan->device->dev, "Entry %s()\n" , __func__); |
1068 | |
1069 | nbpf_chan_halt(chan); |
1070 | nbpf_chan_idle(chan); |
1071 | /* Clean up for if a channel is re-used for MEMCPY after slave DMA */ |
1072 | nbpf_chan_prepare_default(chan); |
1073 | |
1074 | list_for_each_entry_safe(dpage, tmp, &chan->desc_page, node) { |
1075 | struct nbpf_link_desc *ldesc; |
1076 | int i; |
1077 | list_del(entry: &dpage->node); |
1078 | for (i = 0, ldesc = dpage->ldesc; |
1079 | i < ARRAY_SIZE(dpage->ldesc); |
1080 | i++, ldesc++) |
1081 | dma_unmap_single(dchan->device->dev, ldesc->hwdesc_dma_addr, |
1082 | sizeof(*ldesc->hwdesc), DMA_TO_DEVICE); |
1083 | free_page((unsigned long)dpage); |
1084 | } |
1085 | } |
1086 | |
1087 | static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec, |
1088 | struct of_dma *ofdma) |
1089 | { |
1090 | struct nbpf_device *nbpf = ofdma->of_dma_data; |
1091 | struct dma_chan *dchan; |
1092 | struct nbpf_channel *chan; |
1093 | |
1094 | if (dma_spec->args_count != 2) |
1095 | return NULL; |
1096 | |
1097 | dchan = dma_get_any_slave_channel(device: &nbpf->dma_dev); |
1098 | if (!dchan) |
1099 | return NULL; |
1100 | |
1101 | dev_dbg(dchan->device->dev, "Entry %s(%pOFn)\n" , __func__, |
1102 | dma_spec->np); |
1103 | |
1104 | chan = nbpf_to_chan(dchan); |
1105 | |
1106 | chan->terminal = dma_spec->args[0]; |
1107 | chan->flags = dma_spec->args[1]; |
1108 | |
1109 | nbpf_chan_prepare(chan); |
1110 | nbpf_chan_configure(chan); |
1111 | |
1112 | return dchan; |
1113 | } |
1114 | |
1115 | static void nbpf_chan_tasklet(struct tasklet_struct *t) |
1116 | { |
1117 | struct nbpf_channel *chan = from_tasklet(chan, t, tasklet); |
1118 | struct nbpf_desc *desc, *tmp; |
1119 | struct dmaengine_desc_callback cb; |
1120 | |
1121 | while (!list_empty(head: &chan->done)) { |
1122 | bool found = false, must_put, recycling = false; |
1123 | |
1124 | spin_lock_irq(lock: &chan->lock); |
1125 | |
1126 | list_for_each_entry_safe(desc, tmp, &chan->done, node) { |
1127 | if (!desc->user_wait) { |
1128 | /* Newly completed descriptor, have to process */ |
1129 | found = true; |
1130 | break; |
1131 | } else if (async_tx_test_ack(tx: &desc->async_tx)) { |
1132 | /* |
1133 | * This descriptor was waiting for a user ACK, |
1134 | * it can be recycled now. |
1135 | */ |
1136 | list_del(entry: &desc->node); |
1137 | spin_unlock_irq(lock: &chan->lock); |
1138 | nbpf_desc_put(desc); |
1139 | recycling = true; |
1140 | break; |
1141 | } |
1142 | } |
1143 | |
1144 | if (recycling) |
1145 | continue; |
1146 | |
1147 | if (!found) { |
1148 | /* This can happen if TERMINATE_ALL has been called */ |
1149 | spin_unlock_irq(lock: &chan->lock); |
1150 | break; |
1151 | } |
1152 | |
1153 | dma_cookie_complete(tx: &desc->async_tx); |
1154 | |
1155 | /* |
1156 | * With released lock we cannot dereference desc, maybe it's |
1157 | * still on the "done" list |
1158 | */ |
1159 | if (async_tx_test_ack(tx: &desc->async_tx)) { |
1160 | list_del(entry: &desc->node); |
1161 | must_put = true; |
1162 | } else { |
1163 | desc->user_wait = true; |
1164 | must_put = false; |
1165 | } |
1166 | |
1167 | dmaengine_desc_get_callback(tx: &desc->async_tx, cb: &cb); |
1168 | |
1169 | /* ack and callback completed descriptor */ |
1170 | spin_unlock_irq(lock: &chan->lock); |
1171 | |
1172 | dmaengine_desc_callback_invoke(cb: &cb, NULL); |
1173 | |
1174 | if (must_put) |
1175 | nbpf_desc_put(desc); |
1176 | } |
1177 | } |
1178 | |
1179 | static irqreturn_t nbpf_chan_irq(int irq, void *dev) |
1180 | { |
1181 | struct nbpf_channel *chan = dev; |
1182 | bool done = nbpf_status_get(chan); |
1183 | struct nbpf_desc *desc; |
1184 | irqreturn_t ret; |
1185 | bool bh = false; |
1186 | |
1187 | if (!done) |
1188 | return IRQ_NONE; |
1189 | |
1190 | nbpf_status_ack(chan); |
1191 | |
1192 | dev_dbg(&chan->dma_chan.dev->device, "%s()\n" , __func__); |
1193 | |
1194 | spin_lock(lock: &chan->lock); |
1195 | desc = chan->running; |
1196 | if (WARN_ON(!desc)) { |
1197 | ret = IRQ_NONE; |
1198 | goto unlock; |
1199 | } else { |
1200 | ret = IRQ_HANDLED; |
1201 | bh = true; |
1202 | } |
1203 | |
1204 | list_move_tail(list: &desc->node, head: &chan->done); |
1205 | chan->running = NULL; |
1206 | |
1207 | if (!list_empty(head: &chan->active)) { |
1208 | desc = list_first_entry(&chan->active, |
1209 | struct nbpf_desc, node); |
1210 | if (!nbpf_start(desc)) |
1211 | chan->running = desc; |
1212 | } |
1213 | |
1214 | unlock: |
1215 | spin_unlock(lock: &chan->lock); |
1216 | |
1217 | if (bh) |
1218 | tasklet_schedule(t: &chan->tasklet); |
1219 | |
1220 | return ret; |
1221 | } |
1222 | |
1223 | static irqreturn_t nbpf_err_irq(int irq, void *dev) |
1224 | { |
1225 | struct nbpf_device *nbpf = dev; |
1226 | u32 error = nbpf_error_get(nbpf); |
1227 | |
1228 | dev_warn(nbpf->dma_dev.dev, "DMA error IRQ %u\n" , irq); |
1229 | |
1230 | if (!error) |
1231 | return IRQ_NONE; |
1232 | |
1233 | do { |
1234 | struct nbpf_channel *chan = nbpf_error_get_channel(nbpf, error); |
1235 | /* On error: abort all queued transfers, no callback */ |
1236 | nbpf_error_clear(chan); |
1237 | nbpf_chan_idle(chan); |
1238 | error = nbpf_error_get(nbpf); |
1239 | } while (error); |
1240 | |
1241 | return IRQ_HANDLED; |
1242 | } |
1243 | |
1244 | static int nbpf_chan_probe(struct nbpf_device *nbpf, int n) |
1245 | { |
1246 | struct dma_device *dma_dev = &nbpf->dma_dev; |
1247 | struct nbpf_channel *chan = nbpf->chan + n; |
1248 | int ret; |
1249 | |
1250 | chan->nbpf = nbpf; |
1251 | chan->base = nbpf->base + NBPF_REG_CHAN_OFFSET + NBPF_REG_CHAN_SIZE * n; |
1252 | INIT_LIST_HEAD(list: &chan->desc_page); |
1253 | spin_lock_init(&chan->lock); |
1254 | chan->dma_chan.device = dma_dev; |
1255 | dma_cookie_init(chan: &chan->dma_chan); |
1256 | nbpf_chan_prepare_default(chan); |
1257 | |
1258 | dev_dbg(dma_dev->dev, "%s(): channel %d: -> %p\n" , __func__, n, chan->base); |
1259 | |
1260 | snprintf(buf: chan->name, size: sizeof(chan->name), fmt: "nbpf %d" , n); |
1261 | |
1262 | tasklet_setup(t: &chan->tasklet, callback: nbpf_chan_tasklet); |
1263 | ret = devm_request_irq(dev: dma_dev->dev, irq: chan->irq, |
1264 | handler: nbpf_chan_irq, IRQF_SHARED, |
1265 | devname: chan->name, dev_id: chan); |
1266 | if (ret < 0) |
1267 | return ret; |
1268 | |
1269 | /* Add the channel to DMA device channel list */ |
1270 | list_add_tail(new: &chan->dma_chan.device_node, |
1271 | head: &dma_dev->channels); |
1272 | |
1273 | return 0; |
1274 | } |
1275 | |
1276 | static const struct of_device_id nbpf_match[] = { |
1277 | {.compatible = "renesas,nbpfaxi64dmac1b4" , .data = &nbpf_cfg[NBPF1B4]}, |
1278 | {.compatible = "renesas,nbpfaxi64dmac1b8" , .data = &nbpf_cfg[NBPF1B8]}, |
1279 | {.compatible = "renesas,nbpfaxi64dmac1b16" , .data = &nbpf_cfg[NBPF1B16]}, |
1280 | {.compatible = "renesas,nbpfaxi64dmac4b4" , .data = &nbpf_cfg[NBPF4B4]}, |
1281 | {.compatible = "renesas,nbpfaxi64dmac4b8" , .data = &nbpf_cfg[NBPF4B8]}, |
1282 | {.compatible = "renesas,nbpfaxi64dmac4b16" , .data = &nbpf_cfg[NBPF4B16]}, |
1283 | {.compatible = "renesas,nbpfaxi64dmac8b4" , .data = &nbpf_cfg[NBPF8B4]}, |
1284 | {.compatible = "renesas,nbpfaxi64dmac8b8" , .data = &nbpf_cfg[NBPF8B8]}, |
1285 | {.compatible = "renesas,nbpfaxi64dmac8b16" , .data = &nbpf_cfg[NBPF8B16]}, |
1286 | {} |
1287 | }; |
1288 | MODULE_DEVICE_TABLE(of, nbpf_match); |
1289 | |
1290 | static int nbpf_probe(struct platform_device *pdev) |
1291 | { |
1292 | struct device *dev = &pdev->dev; |
1293 | struct device_node *np = dev->of_node; |
1294 | struct nbpf_device *nbpf; |
1295 | struct dma_device *dma_dev; |
1296 | const struct nbpf_config *cfg; |
1297 | int num_channels; |
1298 | int ret, irq, eirq, i; |
1299 | int irqbuf[9] /* maximum 8 channels + error IRQ */; |
1300 | unsigned int irqs = 0; |
1301 | |
1302 | BUILD_BUG_ON(sizeof(struct nbpf_desc_page) > PAGE_SIZE); |
1303 | |
1304 | /* DT only */ |
1305 | if (!np) |
1306 | return -ENODEV; |
1307 | |
1308 | cfg = of_device_get_match_data(dev); |
1309 | num_channels = cfg->num_channels; |
1310 | |
1311 | nbpf = devm_kzalloc(dev, struct_size(nbpf, chan, num_channels), |
1312 | GFP_KERNEL); |
1313 | if (!nbpf) |
1314 | return -ENOMEM; |
1315 | |
1316 | dma_dev = &nbpf->dma_dev; |
1317 | dma_dev->dev = dev; |
1318 | |
1319 | nbpf->base = devm_platform_ioremap_resource(pdev, index: 0); |
1320 | if (IS_ERR(ptr: nbpf->base)) |
1321 | return PTR_ERR(ptr: nbpf->base); |
1322 | |
1323 | nbpf->clk = devm_clk_get(dev, NULL); |
1324 | if (IS_ERR(ptr: nbpf->clk)) |
1325 | return PTR_ERR(ptr: nbpf->clk); |
1326 | |
1327 | of_property_read_u32(np, propname: "max-burst-mem-read" , |
1328 | out_value: &nbpf->max_burst_mem_read); |
1329 | of_property_read_u32(np, propname: "max-burst-mem-write" , |
1330 | out_value: &nbpf->max_burst_mem_write); |
1331 | |
1332 | nbpf->config = cfg; |
1333 | |
1334 | for (i = 0; irqs < ARRAY_SIZE(irqbuf); i++) { |
1335 | irq = platform_get_irq_optional(pdev, i); |
1336 | if (irq < 0 && irq != -ENXIO) |
1337 | return irq; |
1338 | if (irq > 0) |
1339 | irqbuf[irqs++] = irq; |
1340 | } |
1341 | |
1342 | /* |
1343 | * 3 IRQ resource schemes are supported: |
1344 | * 1. 1 shared IRQ for error and all channels |
1345 | * 2. 2 IRQs: one for error and one shared for all channels |
1346 | * 3. 1 IRQ for error and an own IRQ for each channel |
1347 | */ |
1348 | if (irqs != 1 && irqs != 2 && irqs != num_channels + 1) |
1349 | return -ENXIO; |
1350 | |
1351 | if (irqs == 1) { |
1352 | eirq = irqbuf[0]; |
1353 | |
1354 | for (i = 0; i <= num_channels; i++) |
1355 | nbpf->chan[i].irq = irqbuf[0]; |
1356 | } else { |
1357 | eirq = platform_get_irq_byname(pdev, "error" ); |
1358 | if (eirq < 0) |
1359 | return eirq; |
1360 | |
1361 | if (irqs == num_channels + 1) { |
1362 | struct nbpf_channel *chan; |
1363 | |
1364 | for (i = 0, chan = nbpf->chan; i <= num_channels; |
1365 | i++, chan++) { |
1366 | /* Skip the error IRQ */ |
1367 | if (irqbuf[i] == eirq) |
1368 | i++; |
1369 | chan->irq = irqbuf[i]; |
1370 | } |
1371 | |
1372 | if (chan != nbpf->chan + num_channels) |
1373 | return -EINVAL; |
1374 | } else { |
1375 | /* 2 IRQs and more than one channel */ |
1376 | if (irqbuf[0] == eirq) |
1377 | irq = irqbuf[1]; |
1378 | else |
1379 | irq = irqbuf[0]; |
1380 | |
1381 | for (i = 0; i <= num_channels; i++) |
1382 | nbpf->chan[i].irq = irq; |
1383 | } |
1384 | } |
1385 | |
1386 | ret = devm_request_irq(dev, irq: eirq, handler: nbpf_err_irq, |
1387 | IRQF_SHARED, devname: "dma error" , dev_id: nbpf); |
1388 | if (ret < 0) |
1389 | return ret; |
1390 | nbpf->eirq = eirq; |
1391 | |
1392 | INIT_LIST_HEAD(list: &dma_dev->channels); |
1393 | |
1394 | /* Create DMA Channel */ |
1395 | for (i = 0; i < num_channels; i++) { |
1396 | ret = nbpf_chan_probe(nbpf, n: i); |
1397 | if (ret < 0) |
1398 | return ret; |
1399 | } |
1400 | |
1401 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); |
1402 | dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); |
1403 | dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); |
1404 | |
1405 | /* Common and MEMCPY operations */ |
1406 | dma_dev->device_alloc_chan_resources |
1407 | = nbpf_alloc_chan_resources; |
1408 | dma_dev->device_free_chan_resources = nbpf_free_chan_resources; |
1409 | dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy; |
1410 | dma_dev->device_tx_status = nbpf_tx_status; |
1411 | dma_dev->device_issue_pending = nbpf_issue_pending; |
1412 | |
1413 | /* |
1414 | * If we drop support for unaligned MEMCPY buffer addresses and / or |
1415 | * lengths by setting |
1416 | * dma_dev->copy_align = 4; |
1417 | * then we can set transfer length to 4 bytes in nbpf_prep_one() for |
1418 | * DMA_MEM_TO_MEM |
1419 | */ |
1420 | |
1421 | /* Compulsory for DMA_SLAVE fields */ |
1422 | dma_dev->device_prep_slave_sg = nbpf_prep_slave_sg; |
1423 | dma_dev->device_config = nbpf_config; |
1424 | dma_dev->device_pause = nbpf_pause; |
1425 | dma_dev->device_terminate_all = nbpf_terminate_all; |
1426 | |
1427 | dma_dev->src_addr_widths = NBPF_DMA_BUSWIDTHS; |
1428 | dma_dev->dst_addr_widths = NBPF_DMA_BUSWIDTHS; |
1429 | dma_dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); |
1430 | |
1431 | platform_set_drvdata(pdev, data: nbpf); |
1432 | |
1433 | ret = clk_prepare_enable(clk: nbpf->clk); |
1434 | if (ret < 0) |
1435 | return ret; |
1436 | |
1437 | nbpf_configure(nbpf); |
1438 | |
1439 | ret = dma_async_device_register(device: dma_dev); |
1440 | if (ret < 0) |
1441 | goto e_clk_off; |
1442 | |
1443 | ret = of_dma_controller_register(np, of_dma_xlate: nbpf_of_xlate, data: nbpf); |
1444 | if (ret < 0) |
1445 | goto e_dma_dev_unreg; |
1446 | |
1447 | return 0; |
1448 | |
1449 | e_dma_dev_unreg: |
1450 | dma_async_device_unregister(device: dma_dev); |
1451 | e_clk_off: |
1452 | clk_disable_unprepare(clk: nbpf->clk); |
1453 | |
1454 | return ret; |
1455 | } |
1456 | |
1457 | static void nbpf_remove(struct platform_device *pdev) |
1458 | { |
1459 | struct nbpf_device *nbpf = platform_get_drvdata(pdev); |
1460 | int i; |
1461 | |
1462 | devm_free_irq(dev: &pdev->dev, irq: nbpf->eirq, dev_id: nbpf); |
1463 | |
1464 | for (i = 0; i < nbpf->config->num_channels; i++) { |
1465 | struct nbpf_channel *chan = nbpf->chan + i; |
1466 | |
1467 | devm_free_irq(dev: &pdev->dev, irq: chan->irq, dev_id: chan); |
1468 | |
1469 | tasklet_kill(t: &chan->tasklet); |
1470 | } |
1471 | |
1472 | of_dma_controller_free(np: pdev->dev.of_node); |
1473 | dma_async_device_unregister(device: &nbpf->dma_dev); |
1474 | clk_disable_unprepare(clk: nbpf->clk); |
1475 | } |
1476 | |
1477 | static const struct platform_device_id nbpf_ids[] = { |
1478 | {"nbpfaxi64dmac1b4" , (kernel_ulong_t)&nbpf_cfg[NBPF1B4]}, |
1479 | {"nbpfaxi64dmac1b8" , (kernel_ulong_t)&nbpf_cfg[NBPF1B8]}, |
1480 | {"nbpfaxi64dmac1b16" , (kernel_ulong_t)&nbpf_cfg[NBPF1B16]}, |
1481 | {"nbpfaxi64dmac4b4" , (kernel_ulong_t)&nbpf_cfg[NBPF4B4]}, |
1482 | {"nbpfaxi64dmac4b8" , (kernel_ulong_t)&nbpf_cfg[NBPF4B8]}, |
1483 | {"nbpfaxi64dmac4b16" , (kernel_ulong_t)&nbpf_cfg[NBPF4B16]}, |
1484 | {"nbpfaxi64dmac8b4" , (kernel_ulong_t)&nbpf_cfg[NBPF8B4]}, |
1485 | {"nbpfaxi64dmac8b8" , (kernel_ulong_t)&nbpf_cfg[NBPF8B8]}, |
1486 | {"nbpfaxi64dmac8b16" , (kernel_ulong_t)&nbpf_cfg[NBPF8B16]}, |
1487 | {}, |
1488 | }; |
1489 | MODULE_DEVICE_TABLE(platform, nbpf_ids); |
1490 | |
1491 | #ifdef CONFIG_PM |
1492 | static int nbpf_runtime_suspend(struct device *dev) |
1493 | { |
1494 | struct nbpf_device *nbpf = dev_get_drvdata(dev); |
1495 | clk_disable_unprepare(clk: nbpf->clk); |
1496 | return 0; |
1497 | } |
1498 | |
1499 | static int nbpf_runtime_resume(struct device *dev) |
1500 | { |
1501 | struct nbpf_device *nbpf = dev_get_drvdata(dev); |
1502 | return clk_prepare_enable(clk: nbpf->clk); |
1503 | } |
1504 | #endif |
1505 | |
1506 | static const struct dev_pm_ops nbpf_pm_ops = { |
1507 | SET_RUNTIME_PM_OPS(nbpf_runtime_suspend, nbpf_runtime_resume, NULL) |
1508 | }; |
1509 | |
1510 | static struct platform_driver nbpf_driver = { |
1511 | .driver = { |
1512 | .name = "dma-nbpf" , |
1513 | .of_match_table = nbpf_match, |
1514 | .pm = &nbpf_pm_ops, |
1515 | }, |
1516 | .id_table = nbpf_ids, |
1517 | .probe = nbpf_probe, |
1518 | .remove_new = nbpf_remove, |
1519 | }; |
1520 | |
1521 | module_platform_driver(nbpf_driver); |
1522 | |
1523 | MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>" ); |
1524 | MODULE_DESCRIPTION("dmaengine driver for NBPFAXI64* DMACs" ); |
1525 | MODULE_LICENSE("GPL v2" ); |
1526 | |