1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Driver for the Analog Devices AXI-DMAC core |
4 | * |
5 | * Copyright 2013-2019 Analog Devices Inc. |
6 | * Author: Lars-Peter Clausen <lars@metafoo.de> |
7 | */ |
8 | |
9 | #include <linux/bitfield.h> |
10 | #include <linux/clk.h> |
11 | #include <linux/device.h> |
12 | #include <linux/dma-mapping.h> |
13 | #include <linux/dmaengine.h> |
14 | #include <linux/err.h> |
15 | #include <linux/interrupt.h> |
16 | #include <linux/io.h> |
17 | #include <linux/kernel.h> |
18 | #include <linux/module.h> |
19 | #include <linux/of.h> |
20 | #include <linux/of_dma.h> |
21 | #include <linux/of_address.h> |
22 | #include <linux/platform_device.h> |
23 | #include <linux/regmap.h> |
24 | #include <linux/slab.h> |
25 | #include <linux/fpga/adi-axi-common.h> |
26 | |
27 | #include <dt-bindings/dma/axi-dmac.h> |
28 | |
29 | #include "dmaengine.h" |
30 | #include "virt-dma.h" |
31 | |
32 | /* |
33 | * The AXI-DMAC is a soft IP core that is used in FPGA designs. The core has |
34 | * various instantiation parameters which decided the exact feature set support |
35 | * by the core. |
36 | * |
37 | * Each channel of the core has a source interface and a destination interface. |
38 | * The number of channels and the type of the channel interfaces is selected at |
39 | * configuration time. A interface can either be a connected to a central memory |
40 | * interconnect, which allows access to system memory, or it can be connected to |
41 | * a dedicated bus which is directly connected to a data port on a peripheral. |
42 | * Given that those are configuration options of the core that are selected when |
43 | * it is instantiated this means that they can not be changed by software at |
44 | * runtime. By extension this means that each channel is uni-directional. It can |
45 | * either be device to memory or memory to device, but not both. Also since the |
46 | * device side is a dedicated data bus only connected to a single peripheral |
47 | * there is no address than can or needs to be configured for the device side. |
48 | */ |
49 | |
50 | #define AXI_DMAC_REG_INTERFACE_DESC 0x10 |
51 | #define AXI_DMAC_DMA_SRC_TYPE_MSK GENMASK(13, 12) |
52 | #define AXI_DMAC_DMA_SRC_TYPE_GET(x) FIELD_GET(AXI_DMAC_DMA_SRC_TYPE_MSK, x) |
53 | #define AXI_DMAC_DMA_SRC_WIDTH_MSK GENMASK(11, 8) |
54 | #define AXI_DMAC_DMA_SRC_WIDTH_GET(x) FIELD_GET(AXI_DMAC_DMA_SRC_WIDTH_MSK, x) |
55 | #define AXI_DMAC_DMA_DST_TYPE_MSK GENMASK(5, 4) |
56 | #define AXI_DMAC_DMA_DST_TYPE_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_TYPE_MSK, x) |
57 | #define AXI_DMAC_DMA_DST_WIDTH_MSK GENMASK(3, 0) |
58 | #define AXI_DMAC_DMA_DST_WIDTH_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_WIDTH_MSK, x) |
59 | #define AXI_DMAC_REG_COHERENCY_DESC 0x14 |
60 | #define AXI_DMAC_DST_COHERENT_MSK BIT(0) |
61 | #define AXI_DMAC_DST_COHERENT_GET(x) FIELD_GET(AXI_DMAC_DST_COHERENT_MSK, x) |
62 | |
63 | #define AXI_DMAC_REG_IRQ_MASK 0x80 |
64 | #define AXI_DMAC_REG_IRQ_PENDING 0x84 |
65 | #define AXI_DMAC_REG_IRQ_SOURCE 0x88 |
66 | |
67 | #define AXI_DMAC_REG_CTRL 0x400 |
68 | #define AXI_DMAC_REG_TRANSFER_ID 0x404 |
69 | #define AXI_DMAC_REG_START_TRANSFER 0x408 |
70 | #define AXI_DMAC_REG_FLAGS 0x40c |
71 | #define AXI_DMAC_REG_DEST_ADDRESS 0x410 |
72 | #define AXI_DMAC_REG_SRC_ADDRESS 0x414 |
73 | #define AXI_DMAC_REG_X_LENGTH 0x418 |
74 | #define AXI_DMAC_REG_Y_LENGTH 0x41c |
75 | #define AXI_DMAC_REG_DEST_STRIDE 0x420 |
76 | #define AXI_DMAC_REG_SRC_STRIDE 0x424 |
77 | #define AXI_DMAC_REG_TRANSFER_DONE 0x428 |
78 | #define AXI_DMAC_REG_ACTIVE_TRANSFER_ID 0x42c |
79 | #define AXI_DMAC_REG_STATUS 0x430 |
80 | #define AXI_DMAC_REG_CURRENT_SRC_ADDR 0x434 |
81 | #define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438 |
82 | #define AXI_DMAC_REG_PARTIAL_XFER_LEN 0x44c |
83 | #define AXI_DMAC_REG_PARTIAL_XFER_ID 0x450 |
84 | |
85 | #define AXI_DMAC_CTRL_ENABLE BIT(0) |
86 | #define AXI_DMAC_CTRL_PAUSE BIT(1) |
87 | |
88 | #define AXI_DMAC_IRQ_SOT BIT(0) |
89 | #define AXI_DMAC_IRQ_EOT BIT(1) |
90 | |
91 | #define AXI_DMAC_FLAG_CYCLIC BIT(0) |
92 | #define AXI_DMAC_FLAG_LAST BIT(1) |
93 | #define AXI_DMAC_FLAG_PARTIAL_REPORT BIT(2) |
94 | |
95 | #define AXI_DMAC_FLAG_PARTIAL_XFER_DONE BIT(31) |
96 | |
97 | /* The maximum ID allocated by the hardware is 31 */ |
98 | #define AXI_DMAC_SG_UNUSED 32U |
99 | |
100 | struct axi_dmac_sg { |
101 | dma_addr_t src_addr; |
102 | dma_addr_t dest_addr; |
103 | unsigned int x_len; |
104 | unsigned int y_len; |
105 | unsigned int dest_stride; |
106 | unsigned int src_stride; |
107 | unsigned int id; |
108 | unsigned int partial_len; |
109 | bool schedule_when_free; |
110 | }; |
111 | |
112 | struct axi_dmac_desc { |
113 | struct virt_dma_desc vdesc; |
114 | bool cyclic; |
115 | bool have_partial_xfer; |
116 | |
117 | unsigned int num_submitted; |
118 | unsigned int num_completed; |
119 | unsigned int num_sgs; |
120 | struct axi_dmac_sg sg[] __counted_by(num_sgs); |
121 | }; |
122 | |
123 | struct axi_dmac_chan { |
124 | struct virt_dma_chan vchan; |
125 | |
126 | struct axi_dmac_desc *next_desc; |
127 | struct list_head active_descs; |
128 | enum dma_transfer_direction direction; |
129 | |
130 | unsigned int src_width; |
131 | unsigned int dest_width; |
132 | unsigned int src_type; |
133 | unsigned int dest_type; |
134 | |
135 | unsigned int max_length; |
136 | unsigned int address_align_mask; |
137 | unsigned int length_align_mask; |
138 | |
139 | bool hw_partial_xfer; |
140 | bool hw_cyclic; |
141 | bool hw_2d; |
142 | }; |
143 | |
144 | struct axi_dmac { |
145 | void __iomem *base; |
146 | int irq; |
147 | |
148 | struct clk *clk; |
149 | |
150 | struct dma_device dma_dev; |
151 | struct axi_dmac_chan chan; |
152 | }; |
153 | |
154 | static struct axi_dmac *chan_to_axi_dmac(struct axi_dmac_chan *chan) |
155 | { |
156 | return container_of(chan->vchan.chan.device, struct axi_dmac, |
157 | dma_dev); |
158 | } |
159 | |
160 | static struct axi_dmac_chan *to_axi_dmac_chan(struct dma_chan *c) |
161 | { |
162 | return container_of(c, struct axi_dmac_chan, vchan.chan); |
163 | } |
164 | |
165 | static struct axi_dmac_desc *to_axi_dmac_desc(struct virt_dma_desc *vdesc) |
166 | { |
167 | return container_of(vdesc, struct axi_dmac_desc, vdesc); |
168 | } |
169 | |
170 | static void axi_dmac_write(struct axi_dmac *axi_dmac, unsigned int reg, |
171 | unsigned int val) |
172 | { |
173 | writel(val, addr: axi_dmac->base + reg); |
174 | } |
175 | |
176 | static int axi_dmac_read(struct axi_dmac *axi_dmac, unsigned int reg) |
177 | { |
178 | return readl(addr: axi_dmac->base + reg); |
179 | } |
180 | |
181 | static int axi_dmac_src_is_mem(struct axi_dmac_chan *chan) |
182 | { |
183 | return chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM; |
184 | } |
185 | |
186 | static int axi_dmac_dest_is_mem(struct axi_dmac_chan *chan) |
187 | { |
188 | return chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM; |
189 | } |
190 | |
191 | static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len) |
192 | { |
193 | if (len == 0) |
194 | return false; |
195 | if ((len & chan->length_align_mask) != 0) /* Not aligned */ |
196 | return false; |
197 | return true; |
198 | } |
199 | |
200 | static bool axi_dmac_check_addr(struct axi_dmac_chan *chan, dma_addr_t addr) |
201 | { |
202 | if ((addr & chan->address_align_mask) != 0) /* Not aligned */ |
203 | return false; |
204 | return true; |
205 | } |
206 | |
207 | static void axi_dmac_start_transfer(struct axi_dmac_chan *chan) |
208 | { |
209 | struct axi_dmac *dmac = chan_to_axi_dmac(chan); |
210 | struct virt_dma_desc *vdesc; |
211 | struct axi_dmac_desc *desc; |
212 | struct axi_dmac_sg *sg; |
213 | unsigned int flags = 0; |
214 | unsigned int val; |
215 | |
216 | val = axi_dmac_read(axi_dmac: dmac, AXI_DMAC_REG_START_TRANSFER); |
217 | if (val) /* Queue is full, wait for the next SOT IRQ */ |
218 | return; |
219 | |
220 | desc = chan->next_desc; |
221 | |
222 | if (!desc) { |
223 | vdesc = vchan_next_desc(vc: &chan->vchan); |
224 | if (!vdesc) |
225 | return; |
226 | list_move_tail(list: &vdesc->node, head: &chan->active_descs); |
227 | desc = to_axi_dmac_desc(vdesc); |
228 | } |
229 | sg = &desc->sg[desc->num_submitted]; |
230 | |
231 | /* Already queued in cyclic mode. Wait for it to finish */ |
232 | if (sg->id != AXI_DMAC_SG_UNUSED) { |
233 | sg->schedule_when_free = true; |
234 | return; |
235 | } |
236 | |
237 | desc->num_submitted++; |
238 | if (desc->num_submitted == desc->num_sgs || |
239 | desc->have_partial_xfer) { |
240 | if (desc->cyclic) |
241 | desc->num_submitted = 0; /* Start again */ |
242 | else |
243 | chan->next_desc = NULL; |
244 | flags |= AXI_DMAC_FLAG_LAST; |
245 | } else { |
246 | chan->next_desc = desc; |
247 | } |
248 | |
249 | sg->id = axi_dmac_read(axi_dmac: dmac, AXI_DMAC_REG_TRANSFER_ID); |
250 | |
251 | if (axi_dmac_dest_is_mem(chan)) { |
252 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_DEST_ADDRESS, val: sg->dest_addr); |
253 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_DEST_STRIDE, val: sg->dest_stride); |
254 | } |
255 | |
256 | if (axi_dmac_src_is_mem(chan)) { |
257 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_SRC_ADDRESS, val: sg->src_addr); |
258 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_SRC_STRIDE, val: sg->src_stride); |
259 | } |
260 | |
261 | /* |
262 | * If the hardware supports cyclic transfers and there is no callback to |
263 | * call and only a single segment, enable hw cyclic mode to avoid |
264 | * unnecessary interrupts. |
265 | */ |
266 | if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback && |
267 | desc->num_sgs == 1) |
268 | flags |= AXI_DMAC_FLAG_CYCLIC; |
269 | |
270 | if (chan->hw_partial_xfer) |
271 | flags |= AXI_DMAC_FLAG_PARTIAL_REPORT; |
272 | |
273 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_X_LENGTH, val: sg->x_len - 1); |
274 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_Y_LENGTH, val: sg->y_len - 1); |
275 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_FLAGS, val: flags); |
276 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_START_TRANSFER, val: 1); |
277 | } |
278 | |
279 | static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan) |
280 | { |
281 | return list_first_entry_or_null(&chan->active_descs, |
282 | struct axi_dmac_desc, vdesc.node); |
283 | } |
284 | |
285 | static inline unsigned int axi_dmac_total_sg_bytes(struct axi_dmac_chan *chan, |
286 | struct axi_dmac_sg *sg) |
287 | { |
288 | if (chan->hw_2d) |
289 | return sg->x_len * sg->y_len; |
290 | else |
291 | return sg->x_len; |
292 | } |
293 | |
294 | static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan) |
295 | { |
296 | struct axi_dmac *dmac = chan_to_axi_dmac(chan); |
297 | struct axi_dmac_desc *desc; |
298 | struct axi_dmac_sg *sg; |
299 | u32 xfer_done, len, id, i; |
300 | bool found_sg; |
301 | |
302 | do { |
303 | len = axi_dmac_read(axi_dmac: dmac, AXI_DMAC_REG_PARTIAL_XFER_LEN); |
304 | id = axi_dmac_read(axi_dmac: dmac, AXI_DMAC_REG_PARTIAL_XFER_ID); |
305 | |
306 | found_sg = false; |
307 | list_for_each_entry(desc, &chan->active_descs, vdesc.node) { |
308 | for (i = 0; i < desc->num_sgs; i++) { |
309 | sg = &desc->sg[i]; |
310 | if (sg->id == AXI_DMAC_SG_UNUSED) |
311 | continue; |
312 | if (sg->id == id) { |
313 | desc->have_partial_xfer = true; |
314 | sg->partial_len = len; |
315 | found_sg = true; |
316 | break; |
317 | } |
318 | } |
319 | if (found_sg) |
320 | break; |
321 | } |
322 | |
323 | if (found_sg) { |
324 | dev_dbg(dmac->dma_dev.dev, |
325 | "Found partial segment id=%u, len=%u\n" , |
326 | id, len); |
327 | } else { |
328 | dev_warn(dmac->dma_dev.dev, |
329 | "Not found partial segment id=%u, len=%u\n" , |
330 | id, len); |
331 | } |
332 | |
333 | /* Check if we have any more partial transfers */ |
334 | xfer_done = axi_dmac_read(axi_dmac: dmac, AXI_DMAC_REG_TRANSFER_DONE); |
335 | xfer_done = !(xfer_done & AXI_DMAC_FLAG_PARTIAL_XFER_DONE); |
336 | |
337 | } while (!xfer_done); |
338 | } |
339 | |
340 | static void axi_dmac_compute_residue(struct axi_dmac_chan *chan, |
341 | struct axi_dmac_desc *active) |
342 | { |
343 | struct dmaengine_result *rslt = &active->vdesc.tx_result; |
344 | unsigned int start = active->num_completed - 1; |
345 | struct axi_dmac_sg *sg; |
346 | unsigned int i, total; |
347 | |
348 | rslt->result = DMA_TRANS_NOERROR; |
349 | rslt->residue = 0; |
350 | |
351 | /* |
352 | * We get here if the last completed segment is partial, which |
353 | * means we can compute the residue from that segment onwards |
354 | */ |
355 | for (i = start; i < active->num_sgs; i++) { |
356 | sg = &active->sg[i]; |
357 | total = axi_dmac_total_sg_bytes(chan, sg); |
358 | rslt->residue += (total - sg->partial_len); |
359 | } |
360 | } |
361 | |
362 | static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan, |
363 | unsigned int completed_transfers) |
364 | { |
365 | struct axi_dmac_desc *active; |
366 | struct axi_dmac_sg *sg; |
367 | bool start_next = false; |
368 | |
369 | active = axi_dmac_active_desc(chan); |
370 | if (!active) |
371 | return false; |
372 | |
373 | if (chan->hw_partial_xfer && |
374 | (completed_transfers & AXI_DMAC_FLAG_PARTIAL_XFER_DONE)) |
375 | axi_dmac_dequeue_partial_xfers(chan); |
376 | |
377 | do { |
378 | sg = &active->sg[active->num_completed]; |
379 | if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */ |
380 | break; |
381 | if (!(BIT(sg->id) & completed_transfers)) |
382 | break; |
383 | active->num_completed++; |
384 | sg->id = AXI_DMAC_SG_UNUSED; |
385 | if (sg->schedule_when_free) { |
386 | sg->schedule_when_free = false; |
387 | start_next = true; |
388 | } |
389 | |
390 | if (sg->partial_len) |
391 | axi_dmac_compute_residue(chan, active); |
392 | |
393 | if (active->cyclic) |
394 | vchan_cyclic_callback(vd: &active->vdesc); |
395 | |
396 | if (active->num_completed == active->num_sgs || |
397 | sg->partial_len) { |
398 | if (active->cyclic) { |
399 | active->num_completed = 0; /* wrap around */ |
400 | } else { |
401 | list_del(entry: &active->vdesc.node); |
402 | vchan_cookie_complete(vd: &active->vdesc); |
403 | active = axi_dmac_active_desc(chan); |
404 | } |
405 | } |
406 | } while (active); |
407 | |
408 | return start_next; |
409 | } |
410 | |
411 | static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid) |
412 | { |
413 | struct axi_dmac *dmac = devid; |
414 | unsigned int pending; |
415 | bool start_next = false; |
416 | |
417 | pending = axi_dmac_read(axi_dmac: dmac, AXI_DMAC_REG_IRQ_PENDING); |
418 | if (!pending) |
419 | return IRQ_NONE; |
420 | |
421 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_IRQ_PENDING, val: pending); |
422 | |
423 | spin_lock(lock: &dmac->chan.vchan.lock); |
424 | /* One or more transfers have finished */ |
425 | if (pending & AXI_DMAC_IRQ_EOT) { |
426 | unsigned int completed; |
427 | |
428 | completed = axi_dmac_read(axi_dmac: dmac, AXI_DMAC_REG_TRANSFER_DONE); |
429 | start_next = axi_dmac_transfer_done(chan: &dmac->chan, completed_transfers: completed); |
430 | } |
431 | /* Space has become available in the descriptor queue */ |
432 | if ((pending & AXI_DMAC_IRQ_SOT) || start_next) |
433 | axi_dmac_start_transfer(chan: &dmac->chan); |
434 | spin_unlock(lock: &dmac->chan.vchan.lock); |
435 | |
436 | return IRQ_HANDLED; |
437 | } |
438 | |
439 | static int axi_dmac_terminate_all(struct dma_chan *c) |
440 | { |
441 | struct axi_dmac_chan *chan = to_axi_dmac_chan(c); |
442 | struct axi_dmac *dmac = chan_to_axi_dmac(chan); |
443 | unsigned long flags; |
444 | LIST_HEAD(head); |
445 | |
446 | spin_lock_irqsave(&chan->vchan.lock, flags); |
447 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_CTRL, val: 0); |
448 | chan->next_desc = NULL; |
449 | vchan_get_all_descriptors(vc: &chan->vchan, head: &head); |
450 | list_splice_tail_init(list: &chan->active_descs, head: &head); |
451 | spin_unlock_irqrestore(lock: &chan->vchan.lock, flags); |
452 | |
453 | vchan_dma_desc_free_list(vc: &chan->vchan, head: &head); |
454 | |
455 | return 0; |
456 | } |
457 | |
458 | static void axi_dmac_synchronize(struct dma_chan *c) |
459 | { |
460 | struct axi_dmac_chan *chan = to_axi_dmac_chan(c); |
461 | |
462 | vchan_synchronize(vc: &chan->vchan); |
463 | } |
464 | |
465 | static void axi_dmac_issue_pending(struct dma_chan *c) |
466 | { |
467 | struct axi_dmac_chan *chan = to_axi_dmac_chan(c); |
468 | struct axi_dmac *dmac = chan_to_axi_dmac(chan); |
469 | unsigned long flags; |
470 | |
471 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_CTRL, AXI_DMAC_CTRL_ENABLE); |
472 | |
473 | spin_lock_irqsave(&chan->vchan.lock, flags); |
474 | if (vchan_issue_pending(vc: &chan->vchan)) |
475 | axi_dmac_start_transfer(chan); |
476 | spin_unlock_irqrestore(lock: &chan->vchan.lock, flags); |
477 | } |
478 | |
479 | static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs) |
480 | { |
481 | struct axi_dmac_desc *desc; |
482 | unsigned int i; |
483 | |
484 | desc = kzalloc(struct_size(desc, sg, num_sgs), GFP_NOWAIT); |
485 | if (!desc) |
486 | return NULL; |
487 | desc->num_sgs = num_sgs; |
488 | |
489 | for (i = 0; i < num_sgs; i++) |
490 | desc->sg[i].id = AXI_DMAC_SG_UNUSED; |
491 | |
492 | return desc; |
493 | } |
494 | |
495 | static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan, |
496 | enum dma_transfer_direction direction, dma_addr_t addr, |
497 | unsigned int num_periods, unsigned int period_len, |
498 | struct axi_dmac_sg *sg) |
499 | { |
500 | unsigned int num_segments, i; |
501 | unsigned int segment_size; |
502 | unsigned int len; |
503 | |
504 | /* Split into multiple equally sized segments if necessary */ |
505 | num_segments = DIV_ROUND_UP(period_len, chan->max_length); |
506 | segment_size = DIV_ROUND_UP(period_len, num_segments); |
507 | /* Take care of alignment */ |
508 | segment_size = ((segment_size - 1) | chan->length_align_mask) + 1; |
509 | |
510 | for (i = 0; i < num_periods; i++) { |
511 | len = period_len; |
512 | |
513 | while (len > segment_size) { |
514 | if (direction == DMA_DEV_TO_MEM) |
515 | sg->dest_addr = addr; |
516 | else |
517 | sg->src_addr = addr; |
518 | sg->x_len = segment_size; |
519 | sg->y_len = 1; |
520 | sg++; |
521 | addr += segment_size; |
522 | len -= segment_size; |
523 | } |
524 | |
525 | if (direction == DMA_DEV_TO_MEM) |
526 | sg->dest_addr = addr; |
527 | else |
528 | sg->src_addr = addr; |
529 | sg->x_len = len; |
530 | sg->y_len = 1; |
531 | sg++; |
532 | addr += len; |
533 | } |
534 | |
535 | return sg; |
536 | } |
537 | |
538 | static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg( |
539 | struct dma_chan *c, struct scatterlist *sgl, |
540 | unsigned int sg_len, enum dma_transfer_direction direction, |
541 | unsigned long flags, void *context) |
542 | { |
543 | struct axi_dmac_chan *chan = to_axi_dmac_chan(c); |
544 | struct axi_dmac_desc *desc; |
545 | struct axi_dmac_sg *dsg; |
546 | struct scatterlist *sg; |
547 | unsigned int num_sgs; |
548 | unsigned int i; |
549 | |
550 | if (direction != chan->direction) |
551 | return NULL; |
552 | |
553 | num_sgs = 0; |
554 | for_each_sg(sgl, sg, sg_len, i) |
555 | num_sgs += DIV_ROUND_UP(sg_dma_len(sg), chan->max_length); |
556 | |
557 | desc = axi_dmac_alloc_desc(num_sgs); |
558 | if (!desc) |
559 | return NULL; |
560 | |
561 | dsg = desc->sg; |
562 | |
563 | for_each_sg(sgl, sg, sg_len, i) { |
564 | if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) || |
565 | !axi_dmac_check_len(chan, sg_dma_len(sg))) { |
566 | kfree(objp: desc); |
567 | return NULL; |
568 | } |
569 | |
570 | dsg = axi_dmac_fill_linear_sg(chan, direction, sg_dma_address(sg), num_periods: 1, |
571 | sg_dma_len(sg), sg: dsg); |
572 | } |
573 | |
574 | desc->cyclic = false; |
575 | |
576 | return vchan_tx_prep(vc: &chan->vchan, vd: &desc->vdesc, tx_flags: flags); |
577 | } |
578 | |
579 | static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic( |
580 | struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len, |
581 | size_t period_len, enum dma_transfer_direction direction, |
582 | unsigned long flags) |
583 | { |
584 | struct axi_dmac_chan *chan = to_axi_dmac_chan(c); |
585 | struct axi_dmac_desc *desc; |
586 | unsigned int num_periods, num_segments; |
587 | |
588 | if (direction != chan->direction) |
589 | return NULL; |
590 | |
591 | if (!axi_dmac_check_len(chan, len: buf_len) || |
592 | !axi_dmac_check_addr(chan, addr: buf_addr)) |
593 | return NULL; |
594 | |
595 | if (period_len == 0 || buf_len % period_len) |
596 | return NULL; |
597 | |
598 | num_periods = buf_len / period_len; |
599 | num_segments = DIV_ROUND_UP(period_len, chan->max_length); |
600 | |
601 | desc = axi_dmac_alloc_desc(num_sgs: num_periods * num_segments); |
602 | if (!desc) |
603 | return NULL; |
604 | |
605 | axi_dmac_fill_linear_sg(chan, direction, addr: buf_addr, num_periods, |
606 | period_len, sg: desc->sg); |
607 | |
608 | desc->cyclic = true; |
609 | |
610 | return vchan_tx_prep(vc: &chan->vchan, vd: &desc->vdesc, tx_flags: flags); |
611 | } |
612 | |
613 | static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved( |
614 | struct dma_chan *c, struct dma_interleaved_template *xt, |
615 | unsigned long flags) |
616 | { |
617 | struct axi_dmac_chan *chan = to_axi_dmac_chan(c); |
618 | struct axi_dmac_desc *desc; |
619 | size_t dst_icg, src_icg; |
620 | |
621 | if (xt->frame_size != 1) |
622 | return NULL; |
623 | |
624 | if (xt->dir != chan->direction) |
625 | return NULL; |
626 | |
627 | if (axi_dmac_src_is_mem(chan)) { |
628 | if (!xt->src_inc || !axi_dmac_check_addr(chan, addr: xt->src_start)) |
629 | return NULL; |
630 | } |
631 | |
632 | if (axi_dmac_dest_is_mem(chan)) { |
633 | if (!xt->dst_inc || !axi_dmac_check_addr(chan, addr: xt->dst_start)) |
634 | return NULL; |
635 | } |
636 | |
637 | dst_icg = dmaengine_get_dst_icg(xt, chunk: &xt->sgl[0]); |
638 | src_icg = dmaengine_get_src_icg(xt, chunk: &xt->sgl[0]); |
639 | |
640 | if (chan->hw_2d) { |
641 | if (!axi_dmac_check_len(chan, len: xt->sgl[0].size) || |
642 | xt->numf == 0) |
643 | return NULL; |
644 | if (xt->sgl[0].size + dst_icg > chan->max_length || |
645 | xt->sgl[0].size + src_icg > chan->max_length) |
646 | return NULL; |
647 | } else { |
648 | if (dst_icg != 0 || src_icg != 0) |
649 | return NULL; |
650 | if (chan->max_length / xt->sgl[0].size < xt->numf) |
651 | return NULL; |
652 | if (!axi_dmac_check_len(chan, len: xt->sgl[0].size * xt->numf)) |
653 | return NULL; |
654 | } |
655 | |
656 | desc = axi_dmac_alloc_desc(num_sgs: 1); |
657 | if (!desc) |
658 | return NULL; |
659 | |
660 | if (axi_dmac_src_is_mem(chan)) { |
661 | desc->sg[0].src_addr = xt->src_start; |
662 | desc->sg[0].src_stride = xt->sgl[0].size + src_icg; |
663 | } |
664 | |
665 | if (axi_dmac_dest_is_mem(chan)) { |
666 | desc->sg[0].dest_addr = xt->dst_start; |
667 | desc->sg[0].dest_stride = xt->sgl[0].size + dst_icg; |
668 | } |
669 | |
670 | if (chan->hw_2d) { |
671 | desc->sg[0].x_len = xt->sgl[0].size; |
672 | desc->sg[0].y_len = xt->numf; |
673 | } else { |
674 | desc->sg[0].x_len = xt->sgl[0].size * xt->numf; |
675 | desc->sg[0].y_len = 1; |
676 | } |
677 | |
678 | if (flags & DMA_CYCLIC) |
679 | desc->cyclic = true; |
680 | |
681 | return vchan_tx_prep(vc: &chan->vchan, vd: &desc->vdesc, tx_flags: flags); |
682 | } |
683 | |
684 | static void axi_dmac_free_chan_resources(struct dma_chan *c) |
685 | { |
686 | vchan_free_chan_resources(vc: to_virt_chan(chan: c)); |
687 | } |
688 | |
689 | static void axi_dmac_desc_free(struct virt_dma_desc *vdesc) |
690 | { |
691 | kfree(container_of(vdesc, struct axi_dmac_desc, vdesc)); |
692 | } |
693 | |
694 | static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg) |
695 | { |
696 | switch (reg) { |
697 | case AXI_DMAC_REG_IRQ_MASK: |
698 | case AXI_DMAC_REG_IRQ_SOURCE: |
699 | case AXI_DMAC_REG_IRQ_PENDING: |
700 | case AXI_DMAC_REG_CTRL: |
701 | case AXI_DMAC_REG_TRANSFER_ID: |
702 | case AXI_DMAC_REG_START_TRANSFER: |
703 | case AXI_DMAC_REG_FLAGS: |
704 | case AXI_DMAC_REG_DEST_ADDRESS: |
705 | case AXI_DMAC_REG_SRC_ADDRESS: |
706 | case AXI_DMAC_REG_X_LENGTH: |
707 | case AXI_DMAC_REG_Y_LENGTH: |
708 | case AXI_DMAC_REG_DEST_STRIDE: |
709 | case AXI_DMAC_REG_SRC_STRIDE: |
710 | case AXI_DMAC_REG_TRANSFER_DONE: |
711 | case AXI_DMAC_REG_ACTIVE_TRANSFER_ID: |
712 | case AXI_DMAC_REG_STATUS: |
713 | case AXI_DMAC_REG_CURRENT_SRC_ADDR: |
714 | case AXI_DMAC_REG_CURRENT_DEST_ADDR: |
715 | case AXI_DMAC_REG_PARTIAL_XFER_LEN: |
716 | case AXI_DMAC_REG_PARTIAL_XFER_ID: |
717 | return true; |
718 | default: |
719 | return false; |
720 | } |
721 | } |
722 | |
723 | static const struct regmap_config axi_dmac_regmap_config = { |
724 | .reg_bits = 32, |
725 | .val_bits = 32, |
726 | .reg_stride = 4, |
727 | .max_register = AXI_DMAC_REG_PARTIAL_XFER_ID, |
728 | .readable_reg = axi_dmac_regmap_rdwr, |
729 | .writeable_reg = axi_dmac_regmap_rdwr, |
730 | }; |
731 | |
732 | static void axi_dmac_adjust_chan_params(struct axi_dmac_chan *chan) |
733 | { |
734 | chan->address_align_mask = max(chan->dest_width, chan->src_width) - 1; |
735 | |
736 | if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan)) |
737 | chan->direction = DMA_MEM_TO_MEM; |
738 | else if (!axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan)) |
739 | chan->direction = DMA_MEM_TO_DEV; |
740 | else if (axi_dmac_dest_is_mem(chan) && !axi_dmac_src_is_mem(chan)) |
741 | chan->direction = DMA_DEV_TO_MEM; |
742 | else |
743 | chan->direction = DMA_DEV_TO_DEV; |
744 | } |
745 | |
746 | /* |
747 | * The configuration stored in the devicetree matches the configuration |
748 | * parameters of the peripheral instance and allows the driver to know which |
749 | * features are implemented and how it should behave. |
750 | */ |
751 | static int axi_dmac_parse_chan_dt(struct device_node *of_chan, |
752 | struct axi_dmac_chan *chan) |
753 | { |
754 | u32 val; |
755 | int ret; |
756 | |
757 | ret = of_property_read_u32(np: of_chan, propname: "reg" , out_value: &val); |
758 | if (ret) |
759 | return ret; |
760 | |
761 | /* We only support 1 channel for now */ |
762 | if (val != 0) |
763 | return -EINVAL; |
764 | |
765 | ret = of_property_read_u32(np: of_chan, propname: "adi,source-bus-type" , out_value: &val); |
766 | if (ret) |
767 | return ret; |
768 | if (val > AXI_DMAC_BUS_TYPE_FIFO) |
769 | return -EINVAL; |
770 | chan->src_type = val; |
771 | |
772 | ret = of_property_read_u32(np: of_chan, propname: "adi,destination-bus-type" , out_value: &val); |
773 | if (ret) |
774 | return ret; |
775 | if (val > AXI_DMAC_BUS_TYPE_FIFO) |
776 | return -EINVAL; |
777 | chan->dest_type = val; |
778 | |
779 | ret = of_property_read_u32(np: of_chan, propname: "adi,source-bus-width" , out_value: &val); |
780 | if (ret) |
781 | return ret; |
782 | chan->src_width = val / 8; |
783 | |
784 | ret = of_property_read_u32(np: of_chan, propname: "adi,destination-bus-width" , out_value: &val); |
785 | if (ret) |
786 | return ret; |
787 | chan->dest_width = val / 8; |
788 | |
789 | axi_dmac_adjust_chan_params(chan); |
790 | |
791 | return 0; |
792 | } |
793 | |
794 | static int axi_dmac_parse_dt(struct device *dev, struct axi_dmac *dmac) |
795 | { |
796 | struct device_node *of_channels, *of_chan; |
797 | int ret; |
798 | |
799 | of_channels = of_get_child_by_name(node: dev->of_node, name: "adi,channels" ); |
800 | if (of_channels == NULL) |
801 | return -ENODEV; |
802 | |
803 | for_each_child_of_node(of_channels, of_chan) { |
804 | ret = axi_dmac_parse_chan_dt(of_chan, chan: &dmac->chan); |
805 | if (ret) { |
806 | of_node_put(node: of_chan); |
807 | of_node_put(node: of_channels); |
808 | return -EINVAL; |
809 | } |
810 | } |
811 | of_node_put(node: of_channels); |
812 | |
813 | return 0; |
814 | } |
815 | |
816 | static int axi_dmac_read_chan_config(struct device *dev, struct axi_dmac *dmac) |
817 | { |
818 | struct axi_dmac_chan *chan = &dmac->chan; |
819 | unsigned int val, desc; |
820 | |
821 | desc = axi_dmac_read(axi_dmac: dmac, AXI_DMAC_REG_INTERFACE_DESC); |
822 | if (desc == 0) { |
823 | dev_err(dev, "DMA interface register reads zero\n" ); |
824 | return -EFAULT; |
825 | } |
826 | |
827 | val = AXI_DMAC_DMA_SRC_TYPE_GET(desc); |
828 | if (val > AXI_DMAC_BUS_TYPE_FIFO) { |
829 | dev_err(dev, "Invalid source bus type read: %d\n" , val); |
830 | return -EINVAL; |
831 | } |
832 | chan->src_type = val; |
833 | |
834 | val = AXI_DMAC_DMA_DST_TYPE_GET(desc); |
835 | if (val > AXI_DMAC_BUS_TYPE_FIFO) { |
836 | dev_err(dev, "Invalid destination bus type read: %d\n" , val); |
837 | return -EINVAL; |
838 | } |
839 | chan->dest_type = val; |
840 | |
841 | val = AXI_DMAC_DMA_SRC_WIDTH_GET(desc); |
842 | if (val == 0) { |
843 | dev_err(dev, "Source bus width is zero\n" ); |
844 | return -EINVAL; |
845 | } |
846 | /* widths are stored in log2 */ |
847 | chan->src_width = 1 << val; |
848 | |
849 | val = AXI_DMAC_DMA_DST_WIDTH_GET(desc); |
850 | if (val == 0) { |
851 | dev_err(dev, "Destination bus width is zero\n" ); |
852 | return -EINVAL; |
853 | } |
854 | chan->dest_width = 1 << val; |
855 | |
856 | axi_dmac_adjust_chan_params(chan); |
857 | |
858 | return 0; |
859 | } |
860 | |
861 | static int axi_dmac_detect_caps(struct axi_dmac *dmac, unsigned int version) |
862 | { |
863 | struct axi_dmac_chan *chan = &dmac->chan; |
864 | |
865 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_FLAGS, AXI_DMAC_FLAG_CYCLIC); |
866 | if (axi_dmac_read(axi_dmac: dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC) |
867 | chan->hw_cyclic = true; |
868 | |
869 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_Y_LENGTH, val: 1); |
870 | if (axi_dmac_read(axi_dmac: dmac, AXI_DMAC_REG_Y_LENGTH) == 1) |
871 | chan->hw_2d = true; |
872 | |
873 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_X_LENGTH, val: 0xffffffff); |
874 | chan->max_length = axi_dmac_read(axi_dmac: dmac, AXI_DMAC_REG_X_LENGTH); |
875 | if (chan->max_length != UINT_MAX) |
876 | chan->max_length++; |
877 | |
878 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_DEST_ADDRESS, val: 0xffffffff); |
879 | if (axi_dmac_read(axi_dmac: dmac, AXI_DMAC_REG_DEST_ADDRESS) == 0 && |
880 | chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM) { |
881 | dev_err(dmac->dma_dev.dev, |
882 | "Destination memory-mapped interface not supported." ); |
883 | return -ENODEV; |
884 | } |
885 | |
886 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_SRC_ADDRESS, val: 0xffffffff); |
887 | if (axi_dmac_read(axi_dmac: dmac, AXI_DMAC_REG_SRC_ADDRESS) == 0 && |
888 | chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM) { |
889 | dev_err(dmac->dma_dev.dev, |
890 | "Source memory-mapped interface not supported." ); |
891 | return -ENODEV; |
892 | } |
893 | |
894 | if (version >= ADI_AXI_PCORE_VER(4, 2, 'a')) |
895 | chan->hw_partial_xfer = true; |
896 | |
897 | if (version >= ADI_AXI_PCORE_VER(4, 1, 'a')) { |
898 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_X_LENGTH, val: 0x00); |
899 | chan->length_align_mask = |
900 | axi_dmac_read(axi_dmac: dmac, AXI_DMAC_REG_X_LENGTH); |
901 | } else { |
902 | chan->length_align_mask = chan->address_align_mask; |
903 | } |
904 | |
905 | return 0; |
906 | } |
907 | |
908 | static int axi_dmac_probe(struct platform_device *pdev) |
909 | { |
910 | struct dma_device *dma_dev; |
911 | struct axi_dmac *dmac; |
912 | struct regmap *regmap; |
913 | unsigned int version; |
914 | int ret; |
915 | |
916 | dmac = devm_kzalloc(dev: &pdev->dev, size: sizeof(*dmac), GFP_KERNEL); |
917 | if (!dmac) |
918 | return -ENOMEM; |
919 | |
920 | dmac->irq = platform_get_irq(pdev, 0); |
921 | if (dmac->irq < 0) |
922 | return dmac->irq; |
923 | if (dmac->irq == 0) |
924 | return -EINVAL; |
925 | |
926 | dmac->base = devm_platform_ioremap_resource(pdev, index: 0); |
927 | if (IS_ERR(ptr: dmac->base)) |
928 | return PTR_ERR(ptr: dmac->base); |
929 | |
930 | dmac->clk = devm_clk_get(dev: &pdev->dev, NULL); |
931 | if (IS_ERR(ptr: dmac->clk)) |
932 | return PTR_ERR(ptr: dmac->clk); |
933 | |
934 | ret = clk_prepare_enable(clk: dmac->clk); |
935 | if (ret < 0) |
936 | return ret; |
937 | |
938 | version = axi_dmac_read(axi_dmac: dmac, ADI_AXI_REG_VERSION); |
939 | |
940 | if (version >= ADI_AXI_PCORE_VER(4, 3, 'a')) |
941 | ret = axi_dmac_read_chan_config(dev: &pdev->dev, dmac); |
942 | else |
943 | ret = axi_dmac_parse_dt(dev: &pdev->dev, dmac); |
944 | |
945 | if (ret < 0) |
946 | goto err_clk_disable; |
947 | |
948 | INIT_LIST_HEAD(list: &dmac->chan.active_descs); |
949 | |
950 | dma_set_max_seg_size(dev: &pdev->dev, UINT_MAX); |
951 | |
952 | dma_dev = &dmac->dma_dev; |
953 | dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); |
954 | dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask); |
955 | dma_cap_set(DMA_INTERLEAVE, dma_dev->cap_mask); |
956 | dma_dev->device_free_chan_resources = axi_dmac_free_chan_resources; |
957 | dma_dev->device_tx_status = dma_cookie_status; |
958 | dma_dev->device_issue_pending = axi_dmac_issue_pending; |
959 | dma_dev->device_prep_slave_sg = axi_dmac_prep_slave_sg; |
960 | dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic; |
961 | dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved; |
962 | dma_dev->device_terminate_all = axi_dmac_terminate_all; |
963 | dma_dev->device_synchronize = axi_dmac_synchronize; |
964 | dma_dev->dev = &pdev->dev; |
965 | dma_dev->src_addr_widths = BIT(dmac->chan.src_width); |
966 | dma_dev->dst_addr_widths = BIT(dmac->chan.dest_width); |
967 | dma_dev->directions = BIT(dmac->chan.direction); |
968 | dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; |
969 | INIT_LIST_HEAD(list: &dma_dev->channels); |
970 | |
971 | dmac->chan.vchan.desc_free = axi_dmac_desc_free; |
972 | vchan_init(vc: &dmac->chan.vchan, dmadev: dma_dev); |
973 | |
974 | ret = axi_dmac_detect_caps(dmac, version); |
975 | if (ret) |
976 | goto err_clk_disable; |
977 | |
978 | dma_dev->copy_align = (dmac->chan.address_align_mask + 1); |
979 | |
980 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_IRQ_MASK, val: 0x00); |
981 | |
982 | if (of_dma_is_coherent(np: pdev->dev.of_node)) { |
983 | ret = axi_dmac_read(axi_dmac: dmac, AXI_DMAC_REG_COHERENCY_DESC); |
984 | |
985 | if (version < ADI_AXI_PCORE_VER(4, 4, 'a') || |
986 | !AXI_DMAC_DST_COHERENT_GET(ret)) { |
987 | dev_err(dmac->dma_dev.dev, |
988 | "Coherent DMA not supported in hardware" ); |
989 | ret = -EINVAL; |
990 | goto err_clk_disable; |
991 | } |
992 | } |
993 | |
994 | ret = dma_async_device_register(device: dma_dev); |
995 | if (ret) |
996 | goto err_clk_disable; |
997 | |
998 | ret = of_dma_controller_register(np: pdev->dev.of_node, |
999 | of_dma_xlate: of_dma_xlate_by_chan_id, data: dma_dev); |
1000 | if (ret) |
1001 | goto err_unregister_device; |
1002 | |
1003 | ret = request_irq(irq: dmac->irq, handler: axi_dmac_interrupt_handler, IRQF_SHARED, |
1004 | name: dev_name(dev: &pdev->dev), dev: dmac); |
1005 | if (ret) |
1006 | goto err_unregister_of; |
1007 | |
1008 | platform_set_drvdata(pdev, data: dmac); |
1009 | |
1010 | regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base, |
1011 | &axi_dmac_regmap_config); |
1012 | if (IS_ERR(ptr: regmap)) { |
1013 | ret = PTR_ERR(ptr: regmap); |
1014 | goto err_free_irq; |
1015 | } |
1016 | |
1017 | return 0; |
1018 | |
1019 | err_free_irq: |
1020 | free_irq(dmac->irq, dmac); |
1021 | err_unregister_of: |
1022 | of_dma_controller_free(np: pdev->dev.of_node); |
1023 | err_unregister_device: |
1024 | dma_async_device_unregister(device: &dmac->dma_dev); |
1025 | err_clk_disable: |
1026 | clk_disable_unprepare(clk: dmac->clk); |
1027 | |
1028 | return ret; |
1029 | } |
1030 | |
1031 | static void axi_dmac_remove(struct platform_device *pdev) |
1032 | { |
1033 | struct axi_dmac *dmac = platform_get_drvdata(pdev); |
1034 | |
1035 | of_dma_controller_free(np: pdev->dev.of_node); |
1036 | free_irq(dmac->irq, dmac); |
1037 | tasklet_kill(t: &dmac->chan.vchan.task); |
1038 | dma_async_device_unregister(device: &dmac->dma_dev); |
1039 | clk_disable_unprepare(clk: dmac->clk); |
1040 | } |
1041 | |
1042 | static const struct of_device_id axi_dmac_of_match_table[] = { |
1043 | { .compatible = "adi,axi-dmac-1.00.a" }, |
1044 | { }, |
1045 | }; |
1046 | MODULE_DEVICE_TABLE(of, axi_dmac_of_match_table); |
1047 | |
1048 | static struct platform_driver axi_dmac_driver = { |
1049 | .driver = { |
1050 | .name = "dma-axi-dmac" , |
1051 | .of_match_table = axi_dmac_of_match_table, |
1052 | }, |
1053 | .probe = axi_dmac_probe, |
1054 | .remove_new = axi_dmac_remove, |
1055 | }; |
1056 | module_platform_driver(axi_dmac_driver); |
1057 | |
1058 | MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>" ); |
1059 | MODULE_DESCRIPTION("DMA controller driver for the AXI-DMAC controller" ); |
1060 | MODULE_LICENSE("GPL v2" ); |
1061 | |