1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * DMA driver for Xilinx DMA/Bridge Subsystem |
4 | * |
5 | * Copyright (C) 2017-2020 Xilinx, Inc. All rights reserved. |
6 | * Copyright (C) 2022, Advanced Micro Devices, Inc. |
7 | */ |
8 | |
9 | /* |
10 | * The DMA/Bridge Subsystem for PCI Express allows for the movement of data |
11 | * between Host memory and the DMA subsystem. It does this by operating on |
12 | * 'descriptors' that contain information about the source, destination and |
13 | * amount of data to transfer. These direct memory transfers can be both in |
14 | * the Host to Card (H2C) and Card to Host (C2H) transfers. The DMA can be |
15 | * configured to have a single AXI4 Master interface shared by all channels |
16 | * or one AXI4-Stream interface for each channel enabled. Memory transfers are |
17 | * specified on a per-channel basis in descriptor linked lists, which the DMA |
18 | * fetches from host memory and processes. Events such as descriptor completion |
19 | * and errors are signaled using interrupts. The core also provides up to 16 |
20 | * user interrupt wires that generate interrupts to the host. |
21 | */ |
22 | |
23 | #include <linux/mod_devicetable.h> |
24 | #include <linux/bitfield.h> |
25 | #include <linux/dmapool.h> |
26 | #include <linux/regmap.h> |
27 | #include <linux/dmaengine.h> |
28 | #include <linux/dma/amd_xdma.h> |
29 | #include <linux/platform_device.h> |
30 | #include <linux/platform_data/amd_xdma.h> |
31 | #include <linux/dma-mapping.h> |
32 | #include <linux/pci.h> |
33 | #include "../virt-dma.h" |
34 | #include "xdma-regs.h" |
35 | |
36 | /* mmio regmap config for all XDMA registers */ |
37 | static const struct regmap_config xdma_regmap_config = { |
38 | .reg_bits = 32, |
39 | .val_bits = 32, |
40 | .reg_stride = 4, |
41 | .max_register = XDMA_REG_SPACE_LEN, |
42 | }; |
43 | |
44 | /** |
45 | * struct xdma_desc_block - Descriptor block |
46 | * @virt_addr: Virtual address of block start |
47 | * @dma_addr: DMA address of block start |
48 | */ |
49 | struct xdma_desc_block { |
50 | void *virt_addr; |
51 | dma_addr_t dma_addr; |
52 | }; |
53 | |
54 | /** |
55 | * struct xdma_chan - Driver specific DMA channel structure |
56 | * @vchan: Virtual channel |
57 | * @xdev_hdl: Pointer to DMA device structure |
58 | * @base: Offset of channel registers |
59 | * @desc_pool: Descriptor pool |
60 | * @busy: Busy flag of the channel |
61 | * @dir: Transferring direction of the channel |
62 | * @cfg: Transferring config of the channel |
63 | * @irq: IRQ assigned to the channel |
64 | */ |
65 | struct xdma_chan { |
66 | struct virt_dma_chan vchan; |
67 | void *xdev_hdl; |
68 | u32 base; |
69 | struct dma_pool *desc_pool; |
70 | bool busy; |
71 | enum dma_transfer_direction dir; |
72 | struct dma_slave_config cfg; |
73 | u32 irq; |
74 | }; |
75 | |
76 | /** |
77 | * struct xdma_desc - DMA desc structure |
78 | * @vdesc: Virtual DMA descriptor |
79 | * @chan: DMA channel pointer |
80 | * @dir: Transferring direction of the request |
81 | * @desc_blocks: Hardware descriptor blocks |
82 | * @dblk_num: Number of hardware descriptor blocks |
83 | * @desc_num: Number of hardware descriptors |
84 | * @completed_desc_num: Completed hardware descriptors |
85 | * @cyclic: Cyclic transfer vs. scatter-gather |
86 | * @interleaved_dma: Interleaved DMA transfer |
87 | * @periods: Number of periods in the cyclic transfer |
88 | * @period_size: Size of a period in bytes in cyclic transfers |
89 | * @frames_left: Number of frames left in interleaved DMA transfer |
90 | * @error: tx error flag |
91 | */ |
92 | struct xdma_desc { |
93 | struct virt_dma_desc vdesc; |
94 | struct xdma_chan *chan; |
95 | enum dma_transfer_direction dir; |
96 | struct xdma_desc_block *desc_blocks; |
97 | u32 dblk_num; |
98 | u32 desc_num; |
99 | u32 completed_desc_num; |
100 | bool cyclic; |
101 | bool interleaved_dma; |
102 | u32 periods; |
103 | u32 period_size; |
104 | u32 frames_left; |
105 | bool error; |
106 | }; |
107 | |
108 | #define XDMA_DEV_STATUS_REG_DMA BIT(0) |
109 | #define XDMA_DEV_STATUS_INIT_MSIX BIT(1) |
110 | |
111 | /** |
112 | * struct xdma_device - DMA device structure |
113 | * @pdev: Platform device pointer |
114 | * @dma_dev: DMA device structure |
115 | * @rmap: MMIO regmap for DMA registers |
116 | * @h2c_chans: Host to Card channels |
117 | * @c2h_chans: Card to Host channels |
118 | * @h2c_chan_num: Number of H2C channels |
119 | * @c2h_chan_num: Number of C2H channels |
120 | * @irq_start: Start IRQ assigned to device |
121 | * @irq_num: Number of IRQ assigned to device |
122 | * @status: Initialization status |
123 | */ |
124 | struct xdma_device { |
125 | struct platform_device *pdev; |
126 | struct dma_device dma_dev; |
127 | struct regmap *rmap; |
128 | struct xdma_chan *h2c_chans; |
129 | struct xdma_chan *c2h_chans; |
130 | u32 h2c_chan_num; |
131 | u32 c2h_chan_num; |
132 | u32 irq_start; |
133 | u32 irq_num; |
134 | u32 status; |
135 | }; |
136 | |
137 | #define xdma_err(xdev, fmt, args...) \ |
138 | dev_err(&(xdev)->pdev->dev, fmt, ##args) |
139 | #define XDMA_CHAN_NUM(_xd) ({ \ |
140 | typeof(_xd) (xd) = (_xd); \ |
141 | ((xd)->h2c_chan_num + (xd)->c2h_chan_num); }) |
142 | |
143 | /* Get the last desc in a desc block */ |
144 | static inline void *xdma_blk_last_desc(struct xdma_desc_block *block) |
145 | { |
146 | return block->virt_addr + (XDMA_DESC_ADJACENT - 1) * XDMA_DESC_SIZE; |
147 | } |
148 | |
149 | /** |
150 | * xdma_link_sg_desc_blocks - Link SG descriptor blocks for DMA transfer |
151 | * @sw_desc: Tx descriptor pointer |
152 | */ |
153 | static void xdma_link_sg_desc_blocks(struct xdma_desc *sw_desc) |
154 | { |
155 | struct xdma_desc_block *block; |
156 | u32 last_blk_desc, desc_control; |
157 | struct xdma_hw_desc *desc; |
158 | int i; |
159 | |
160 | desc_control = XDMA_DESC_CONTROL(XDMA_DESC_ADJACENT, 0); |
161 | for (i = 1; i < sw_desc->dblk_num; i++) { |
162 | block = &sw_desc->desc_blocks[i - 1]; |
163 | desc = xdma_blk_last_desc(block); |
164 | |
165 | if (!(i & XDMA_DESC_BLOCK_MASK)) { |
166 | desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST); |
167 | continue; |
168 | } |
169 | desc->control = cpu_to_le32(desc_control); |
170 | desc->next_desc = cpu_to_le64(block[1].dma_addr); |
171 | } |
172 | |
173 | /* update the last block */ |
174 | last_blk_desc = (sw_desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK; |
175 | if (((sw_desc->dblk_num - 1) & XDMA_DESC_BLOCK_MASK) > 0) { |
176 | block = &sw_desc->desc_blocks[sw_desc->dblk_num - 2]; |
177 | desc = xdma_blk_last_desc(block); |
178 | desc_control = XDMA_DESC_CONTROL(last_blk_desc + 1, 0); |
179 | desc->control = cpu_to_le32(desc_control); |
180 | } |
181 | |
182 | block = &sw_desc->desc_blocks[sw_desc->dblk_num - 1]; |
183 | desc = block->virt_addr + last_blk_desc * XDMA_DESC_SIZE; |
184 | desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST); |
185 | } |
186 | |
187 | /** |
188 | * xdma_link_cyclic_desc_blocks - Link cyclic descriptor blocks for DMA transfer |
189 | * @sw_desc: Tx descriptor pointer |
190 | */ |
191 | static void xdma_link_cyclic_desc_blocks(struct xdma_desc *sw_desc) |
192 | { |
193 | struct xdma_desc_block *block; |
194 | struct xdma_hw_desc *desc; |
195 | int i; |
196 | |
197 | block = sw_desc->desc_blocks; |
198 | for (i = 0; i < sw_desc->desc_num - 1; i++) { |
199 | desc = block->virt_addr + i * XDMA_DESC_SIZE; |
200 | desc->next_desc = cpu_to_le64(block->dma_addr + ((i + 1) * XDMA_DESC_SIZE)); |
201 | } |
202 | desc = block->virt_addr + i * XDMA_DESC_SIZE; |
203 | desc->next_desc = cpu_to_le64(block->dma_addr); |
204 | } |
205 | |
206 | static inline struct xdma_chan *to_xdma_chan(struct dma_chan *chan) |
207 | { |
208 | return container_of(chan, struct xdma_chan, vchan.chan); |
209 | } |
210 | |
211 | static inline struct xdma_desc *to_xdma_desc(struct virt_dma_desc *vdesc) |
212 | { |
213 | return container_of(vdesc, struct xdma_desc, vdesc); |
214 | } |
215 | |
216 | /** |
217 | * xdma_channel_init - Initialize DMA channel registers |
218 | * @chan: DMA channel pointer |
219 | */ |
220 | static int xdma_channel_init(struct xdma_chan *chan) |
221 | { |
222 | struct xdma_device *xdev = chan->xdev_hdl; |
223 | int ret; |
224 | |
225 | ret = regmap_write(map: xdev->rmap, reg: chan->base + XDMA_CHAN_CONTROL_W1C, |
226 | CHAN_CTRL_NON_INCR_ADDR); |
227 | if (ret) |
228 | return ret; |
229 | |
230 | ret = regmap_write(map: xdev->rmap, reg: chan->base + XDMA_CHAN_INTR_ENABLE, |
231 | CHAN_IM_ALL); |
232 | if (ret) |
233 | return ret; |
234 | |
235 | return 0; |
236 | } |
237 | |
238 | /** |
239 | * xdma_free_desc - Free descriptor |
240 | * @vdesc: Virtual DMA descriptor |
241 | */ |
242 | static void xdma_free_desc(struct virt_dma_desc *vdesc) |
243 | { |
244 | struct xdma_desc *sw_desc; |
245 | int i; |
246 | |
247 | sw_desc = to_xdma_desc(vdesc); |
248 | for (i = 0; i < sw_desc->dblk_num; i++) { |
249 | if (!sw_desc->desc_blocks[i].virt_addr) |
250 | break; |
251 | dma_pool_free(pool: sw_desc->chan->desc_pool, |
252 | vaddr: sw_desc->desc_blocks[i].virt_addr, |
253 | addr: sw_desc->desc_blocks[i].dma_addr); |
254 | } |
255 | kfree(objp: sw_desc->desc_blocks); |
256 | kfree(objp: sw_desc); |
257 | } |
258 | |
259 | /** |
260 | * xdma_alloc_desc - Allocate descriptor |
261 | * @chan: DMA channel pointer |
262 | * @desc_num: Number of hardware descriptors |
263 | * @cyclic: Whether this is a cyclic transfer |
264 | */ |
265 | static struct xdma_desc * |
266 | xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num, bool cyclic) |
267 | { |
268 | struct xdma_desc *sw_desc; |
269 | struct xdma_hw_desc *desc; |
270 | dma_addr_t dma_addr; |
271 | u32 dblk_num; |
272 | u32 control; |
273 | void *addr; |
274 | int i, j; |
275 | |
276 | sw_desc = kzalloc(size: sizeof(*sw_desc), GFP_NOWAIT); |
277 | if (!sw_desc) |
278 | return NULL; |
279 | |
280 | sw_desc->chan = chan; |
281 | sw_desc->desc_num = desc_num; |
282 | sw_desc->cyclic = cyclic; |
283 | sw_desc->error = false; |
284 | dblk_num = DIV_ROUND_UP(desc_num, XDMA_DESC_ADJACENT); |
285 | sw_desc->desc_blocks = kcalloc(n: dblk_num, size: sizeof(*sw_desc->desc_blocks), |
286 | GFP_NOWAIT); |
287 | if (!sw_desc->desc_blocks) |
288 | goto failed; |
289 | |
290 | if (cyclic) |
291 | control = XDMA_DESC_CONTROL_CYCLIC; |
292 | else |
293 | control = XDMA_DESC_CONTROL(1, 0); |
294 | |
295 | sw_desc->dblk_num = dblk_num; |
296 | for (i = 0; i < sw_desc->dblk_num; i++) { |
297 | addr = dma_pool_alloc(pool: chan->desc_pool, GFP_NOWAIT, handle: &dma_addr); |
298 | if (!addr) |
299 | goto failed; |
300 | |
301 | sw_desc->desc_blocks[i].virt_addr = addr; |
302 | sw_desc->desc_blocks[i].dma_addr = dma_addr; |
303 | for (j = 0, desc = addr; j < XDMA_DESC_ADJACENT; j++) |
304 | desc[j].control = cpu_to_le32(control); |
305 | } |
306 | |
307 | if (cyclic) |
308 | xdma_link_cyclic_desc_blocks(sw_desc); |
309 | else |
310 | xdma_link_sg_desc_blocks(sw_desc); |
311 | |
312 | return sw_desc; |
313 | |
314 | failed: |
315 | xdma_free_desc(vdesc: &sw_desc->vdesc); |
316 | return NULL; |
317 | } |
318 | |
319 | /** |
320 | * xdma_xfer_start - Start DMA transfer |
321 | * @xchan: DMA channel pointer |
322 | */ |
323 | static int xdma_xfer_start(struct xdma_chan *xchan) |
324 | { |
325 | struct virt_dma_desc *vd = vchan_next_desc(vc: &xchan->vchan); |
326 | struct xdma_device *xdev = xchan->xdev_hdl; |
327 | struct xdma_desc_block *block; |
328 | u32 val, completed_blocks; |
329 | struct xdma_desc *desc; |
330 | int ret; |
331 | |
332 | /* |
333 | * check if there is not any submitted descriptor or channel is busy. |
334 | * vchan lock should be held where this function is called. |
335 | */ |
336 | if (!vd || xchan->busy) |
337 | return -EINVAL; |
338 | |
339 | /* clear run stop bit to get ready for transfer */ |
340 | ret = regmap_write(map: xdev->rmap, reg: xchan->base + XDMA_CHAN_CONTROL_W1C, |
341 | CHAN_CTRL_RUN_STOP); |
342 | if (ret) |
343 | return ret; |
344 | |
345 | desc = to_xdma_desc(vdesc: vd); |
346 | if (desc->dir != xchan->dir) { |
347 | xdma_err(xdev, "incorrect request direction" ); |
348 | return -EINVAL; |
349 | } |
350 | |
351 | /* set DMA engine to the first descriptor block */ |
352 | completed_blocks = desc->completed_desc_num / XDMA_DESC_ADJACENT; |
353 | block = &desc->desc_blocks[completed_blocks]; |
354 | val = lower_32_bits(block->dma_addr); |
355 | ret = regmap_write(map: xdev->rmap, reg: xchan->base + XDMA_SGDMA_DESC_LO, val); |
356 | if (ret) |
357 | return ret; |
358 | |
359 | val = upper_32_bits(block->dma_addr); |
360 | ret = regmap_write(map: xdev->rmap, reg: xchan->base + XDMA_SGDMA_DESC_HI, val); |
361 | if (ret) |
362 | return ret; |
363 | |
364 | if (completed_blocks + 1 == desc->dblk_num) |
365 | val = (desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK; |
366 | else |
367 | val = XDMA_DESC_ADJACENT - 1; |
368 | ret = regmap_write(map: xdev->rmap, reg: xchan->base + XDMA_SGDMA_DESC_ADJ, val); |
369 | if (ret) |
370 | return ret; |
371 | |
372 | /* kick off DMA transfer */ |
373 | ret = regmap_write(map: xdev->rmap, reg: xchan->base + XDMA_CHAN_CONTROL, |
374 | CHAN_CTRL_START); |
375 | if (ret) |
376 | return ret; |
377 | |
378 | xchan->busy = true; |
379 | |
380 | return 0; |
381 | } |
382 | |
383 | /** |
384 | * xdma_xfer_stop - Stop DMA transfer |
385 | * @xchan: DMA channel pointer |
386 | */ |
387 | static int xdma_xfer_stop(struct xdma_chan *xchan) |
388 | { |
389 | int ret; |
390 | u32 val; |
391 | struct xdma_device *xdev = xchan->xdev_hdl; |
392 | |
393 | /* clear run stop bit to prevent any further auto-triggering */ |
394 | ret = regmap_write(map: xdev->rmap, reg: xchan->base + XDMA_CHAN_CONTROL_W1C, |
395 | CHAN_CTRL_RUN_STOP); |
396 | if (ret) |
397 | return ret; |
398 | |
399 | /* Clear the channel status register */ |
400 | ret = regmap_read(map: xdev->rmap, reg: xchan->base + XDMA_CHAN_STATUS_RC, val: &val); |
401 | if (ret) |
402 | return ret; |
403 | |
404 | return 0; |
405 | } |
406 | |
407 | /** |
408 | * xdma_alloc_channels - Detect and allocate DMA channels |
409 | * @xdev: DMA device pointer |
410 | * @dir: Channel direction |
411 | */ |
412 | static int xdma_alloc_channels(struct xdma_device *xdev, |
413 | enum dma_transfer_direction dir) |
414 | { |
415 | struct xdma_platdata *pdata = dev_get_platdata(dev: &xdev->pdev->dev); |
416 | struct xdma_chan **chans, *xchan; |
417 | u32 base, identifier, target; |
418 | u32 *chan_num; |
419 | int i, j, ret; |
420 | |
421 | if (dir == DMA_MEM_TO_DEV) { |
422 | base = XDMA_CHAN_H2C_OFFSET; |
423 | target = XDMA_CHAN_H2C_TARGET; |
424 | chans = &xdev->h2c_chans; |
425 | chan_num = &xdev->h2c_chan_num; |
426 | } else if (dir == DMA_DEV_TO_MEM) { |
427 | base = XDMA_CHAN_C2H_OFFSET; |
428 | target = XDMA_CHAN_C2H_TARGET; |
429 | chans = &xdev->c2h_chans; |
430 | chan_num = &xdev->c2h_chan_num; |
431 | } else { |
432 | xdma_err(xdev, "invalid direction specified" ); |
433 | return -EINVAL; |
434 | } |
435 | |
436 | /* detect number of available DMA channels */ |
437 | for (i = 0, *chan_num = 0; i < pdata->max_dma_channels; i++) { |
438 | ret = regmap_read(map: xdev->rmap, reg: base + i * XDMA_CHAN_STRIDE, |
439 | val: &identifier); |
440 | if (ret) |
441 | return ret; |
442 | |
443 | /* check if it is available DMA channel */ |
444 | if (XDMA_CHAN_CHECK_TARGET(identifier, target)) |
445 | (*chan_num)++; |
446 | } |
447 | |
448 | if (!*chan_num) { |
449 | xdma_err(xdev, "does not probe any channel" ); |
450 | return -EINVAL; |
451 | } |
452 | |
453 | *chans = devm_kcalloc(dev: &xdev->pdev->dev, n: *chan_num, size: sizeof(**chans), |
454 | GFP_KERNEL); |
455 | if (!*chans) |
456 | return -ENOMEM; |
457 | |
458 | for (i = 0, j = 0; i < pdata->max_dma_channels; i++) { |
459 | ret = regmap_read(map: xdev->rmap, reg: base + i * XDMA_CHAN_STRIDE, |
460 | val: &identifier); |
461 | if (ret) |
462 | return ret; |
463 | |
464 | if (!XDMA_CHAN_CHECK_TARGET(identifier, target)) |
465 | continue; |
466 | |
467 | if (j == *chan_num) { |
468 | xdma_err(xdev, "invalid channel number" ); |
469 | return -EIO; |
470 | } |
471 | |
472 | /* init channel structure and hardware */ |
473 | xchan = &(*chans)[j]; |
474 | xchan->xdev_hdl = xdev; |
475 | xchan->base = base + i * XDMA_CHAN_STRIDE; |
476 | xchan->dir = dir; |
477 | |
478 | ret = xdma_channel_init(chan: xchan); |
479 | if (ret) |
480 | return ret; |
481 | xchan->vchan.desc_free = xdma_free_desc; |
482 | vchan_init(vc: &xchan->vchan, dmadev: &xdev->dma_dev); |
483 | |
484 | j++; |
485 | } |
486 | |
487 | dev_info(&xdev->pdev->dev, "configured %d %s channels" , j, |
488 | (dir == DMA_MEM_TO_DEV) ? "H2C" : "C2H" ); |
489 | |
490 | return 0; |
491 | } |
492 | |
493 | /** |
494 | * xdma_issue_pending - Issue pending transactions |
495 | * @chan: DMA channel pointer |
496 | */ |
497 | static void xdma_issue_pending(struct dma_chan *chan) |
498 | { |
499 | struct xdma_chan *xdma_chan = to_xdma_chan(chan); |
500 | unsigned long flags; |
501 | |
502 | spin_lock_irqsave(&xdma_chan->vchan.lock, flags); |
503 | if (vchan_issue_pending(vc: &xdma_chan->vchan)) |
504 | xdma_xfer_start(xchan: xdma_chan); |
505 | spin_unlock_irqrestore(lock: &xdma_chan->vchan.lock, flags); |
506 | } |
507 | |
508 | /** |
509 | * xdma_terminate_all - Terminate all transactions |
510 | * @chan: DMA channel pointer |
511 | */ |
512 | static int xdma_terminate_all(struct dma_chan *chan) |
513 | { |
514 | struct xdma_chan *xdma_chan = to_xdma_chan(chan); |
515 | struct virt_dma_desc *vd; |
516 | unsigned long flags; |
517 | LIST_HEAD(head); |
518 | |
519 | xdma_xfer_stop(xchan: xdma_chan); |
520 | |
521 | spin_lock_irqsave(&xdma_chan->vchan.lock, flags); |
522 | |
523 | xdma_chan->busy = false; |
524 | vd = vchan_next_desc(vc: &xdma_chan->vchan); |
525 | if (vd) { |
526 | list_del(entry: &vd->node); |
527 | dma_cookie_complete(tx: &vd->tx); |
528 | vchan_terminate_vdesc(vd); |
529 | } |
530 | vchan_get_all_descriptors(vc: &xdma_chan->vchan, head: &head); |
531 | list_splice_tail(list: &head, head: &xdma_chan->vchan.desc_terminated); |
532 | |
533 | spin_unlock_irqrestore(lock: &xdma_chan->vchan.lock, flags); |
534 | |
535 | return 0; |
536 | } |
537 | |
538 | /** |
539 | * xdma_synchronize - Synchronize terminated transactions |
540 | * @chan: DMA channel pointer |
541 | */ |
542 | static void xdma_synchronize(struct dma_chan *chan) |
543 | { |
544 | struct xdma_chan *xdma_chan = to_xdma_chan(chan); |
545 | |
546 | vchan_synchronize(vc: &xdma_chan->vchan); |
547 | } |
548 | |
549 | /** |
550 | * xdma_fill_descs - Fill hardware descriptors with contiguous memory block addresses |
551 | * @sw_desc: tx descriptor state container |
552 | * @src_addr: Value for a ->src_addr field of a first descriptor |
553 | * @dst_addr: Value for a ->dst_addr field of a first descriptor |
554 | * @size: Total size of a contiguous memory block |
555 | * @filled_descs_num: Number of filled hardware descriptors for corresponding sw_desc |
556 | */ |
557 | static inline u32 xdma_fill_descs(struct xdma_desc *sw_desc, u64 src_addr, |
558 | u64 dst_addr, u32 size, u32 filled_descs_num) |
559 | { |
560 | u32 left = size, len, desc_num = filled_descs_num; |
561 | struct xdma_desc_block *dblk; |
562 | struct xdma_hw_desc *desc; |
563 | |
564 | dblk = sw_desc->desc_blocks + (desc_num / XDMA_DESC_ADJACENT); |
565 | desc = dblk->virt_addr; |
566 | desc += desc_num & XDMA_DESC_ADJACENT_MASK; |
567 | do { |
568 | len = min_t(u32, left, XDMA_DESC_BLEN_MAX); |
569 | /* set hardware descriptor */ |
570 | desc->bytes = cpu_to_le32(len); |
571 | desc->src_addr = cpu_to_le64(src_addr); |
572 | desc->dst_addr = cpu_to_le64(dst_addr); |
573 | if (!(++desc_num & XDMA_DESC_ADJACENT_MASK)) |
574 | desc = (++dblk)->virt_addr; |
575 | else |
576 | desc++; |
577 | |
578 | src_addr += len; |
579 | dst_addr += len; |
580 | left -= len; |
581 | } while (left); |
582 | |
583 | return desc_num - filled_descs_num; |
584 | } |
585 | |
586 | /** |
587 | * xdma_prep_device_sg - prepare a descriptor for a DMA transaction |
588 | * @chan: DMA channel pointer |
589 | * @sgl: Transfer scatter gather list |
590 | * @sg_len: Length of scatter gather list |
591 | * @dir: Transfer direction |
592 | * @flags: transfer ack flags |
593 | * @context: APP words of the descriptor |
594 | */ |
595 | static struct dma_async_tx_descriptor * |
596 | xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl, |
597 | unsigned int sg_len, enum dma_transfer_direction dir, |
598 | unsigned long flags, void *context) |
599 | { |
600 | struct xdma_chan *xdma_chan = to_xdma_chan(chan); |
601 | struct dma_async_tx_descriptor *tx_desc; |
602 | struct xdma_desc *sw_desc; |
603 | u32 desc_num = 0, i; |
604 | u64 addr, dev_addr, *src, *dst; |
605 | struct scatterlist *sg; |
606 | |
607 | for_each_sg(sgl, sg, sg_len, i) |
608 | desc_num += DIV_ROUND_UP(sg_dma_len(sg), XDMA_DESC_BLEN_MAX); |
609 | |
610 | sw_desc = xdma_alloc_desc(chan: xdma_chan, desc_num, cyclic: false); |
611 | if (!sw_desc) |
612 | return NULL; |
613 | sw_desc->dir = dir; |
614 | sw_desc->cyclic = false; |
615 | sw_desc->interleaved_dma = false; |
616 | |
617 | if (dir == DMA_MEM_TO_DEV) { |
618 | dev_addr = xdma_chan->cfg.dst_addr; |
619 | src = &addr; |
620 | dst = &dev_addr; |
621 | } else { |
622 | dev_addr = xdma_chan->cfg.src_addr; |
623 | src = &dev_addr; |
624 | dst = &addr; |
625 | } |
626 | |
627 | desc_num = 0; |
628 | for_each_sg(sgl, sg, sg_len, i) { |
629 | addr = sg_dma_address(sg); |
630 | desc_num += xdma_fill_descs(sw_desc, src_addr: *src, dst_addr: *dst, sg_dma_len(sg), filled_descs_num: desc_num); |
631 | dev_addr += sg_dma_len(sg); |
632 | } |
633 | |
634 | tx_desc = vchan_tx_prep(vc: &xdma_chan->vchan, vd: &sw_desc->vdesc, tx_flags: flags); |
635 | if (!tx_desc) |
636 | goto failed; |
637 | |
638 | return tx_desc; |
639 | |
640 | failed: |
641 | xdma_free_desc(vdesc: &sw_desc->vdesc); |
642 | |
643 | return NULL; |
644 | } |
645 | |
646 | /** |
647 | * xdma_prep_dma_cyclic - prepare for cyclic DMA transactions |
648 | * @chan: DMA channel pointer |
649 | * @address: Device DMA address to access |
650 | * @size: Total length to transfer |
651 | * @period_size: Period size to use for each transfer |
652 | * @dir: Transfer direction |
653 | * @flags: Transfer ack flags |
654 | */ |
655 | static struct dma_async_tx_descriptor * |
656 | xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address, |
657 | size_t size, size_t period_size, |
658 | enum dma_transfer_direction dir, |
659 | unsigned long flags) |
660 | { |
661 | struct xdma_chan *xdma_chan = to_xdma_chan(chan); |
662 | struct xdma_device *xdev = xdma_chan->xdev_hdl; |
663 | unsigned int periods = size / period_size; |
664 | struct dma_async_tx_descriptor *tx_desc; |
665 | struct xdma_desc *sw_desc; |
666 | u64 addr, dev_addr, *src, *dst; |
667 | u32 desc_num; |
668 | unsigned int i; |
669 | |
670 | /* |
671 | * Simplify the whole logic by preventing an abnormally high number of |
672 | * periods and periods size. |
673 | */ |
674 | if (period_size > XDMA_DESC_BLEN_MAX) { |
675 | xdma_err(xdev, "period size limited to %lu bytes\n" , XDMA_DESC_BLEN_MAX); |
676 | return NULL; |
677 | } |
678 | |
679 | if (periods > XDMA_DESC_ADJACENT) { |
680 | xdma_err(xdev, "number of periods limited to %u\n" , XDMA_DESC_ADJACENT); |
681 | return NULL; |
682 | } |
683 | |
684 | sw_desc = xdma_alloc_desc(chan: xdma_chan, desc_num: periods, cyclic: true); |
685 | if (!sw_desc) |
686 | return NULL; |
687 | |
688 | sw_desc->periods = periods; |
689 | sw_desc->period_size = period_size; |
690 | sw_desc->dir = dir; |
691 | sw_desc->interleaved_dma = false; |
692 | |
693 | addr = address; |
694 | if (dir == DMA_MEM_TO_DEV) { |
695 | dev_addr = xdma_chan->cfg.dst_addr; |
696 | src = &addr; |
697 | dst = &dev_addr; |
698 | } else { |
699 | dev_addr = xdma_chan->cfg.src_addr; |
700 | src = &dev_addr; |
701 | dst = &addr; |
702 | } |
703 | |
704 | desc_num = 0; |
705 | for (i = 0; i < periods; i++) { |
706 | desc_num += xdma_fill_descs(sw_desc, src_addr: *src, dst_addr: *dst, size: period_size, filled_descs_num: desc_num); |
707 | addr += i * period_size; |
708 | } |
709 | |
710 | tx_desc = vchan_tx_prep(vc: &xdma_chan->vchan, vd: &sw_desc->vdesc, tx_flags: flags); |
711 | if (!tx_desc) |
712 | goto failed; |
713 | |
714 | return tx_desc; |
715 | |
716 | failed: |
717 | xdma_free_desc(vdesc: &sw_desc->vdesc); |
718 | |
719 | return NULL; |
720 | } |
721 | |
722 | /** |
723 | * xdma_prep_interleaved_dma - Prepare virtual descriptor for interleaved DMA transfers |
724 | * @chan: DMA channel |
725 | * @xt: DMA transfer template |
726 | * @flags: tx flags |
727 | */ |
728 | static struct dma_async_tx_descriptor * |
729 | xdma_prep_interleaved_dma(struct dma_chan *chan, |
730 | struct dma_interleaved_template *xt, |
731 | unsigned long flags) |
732 | { |
733 | int i; |
734 | u32 desc_num = 0, period_size = 0; |
735 | struct dma_async_tx_descriptor *tx_desc; |
736 | struct xdma_chan *xchan = to_xdma_chan(chan); |
737 | struct xdma_desc *sw_desc; |
738 | u64 src_addr, dst_addr; |
739 | |
740 | for (i = 0; i < xt->frame_size; ++i) |
741 | desc_num += DIV_ROUND_UP(xt->sgl[i].size, XDMA_DESC_BLEN_MAX); |
742 | |
743 | sw_desc = xdma_alloc_desc(chan: xchan, desc_num, cyclic: false); |
744 | if (!sw_desc) |
745 | return NULL; |
746 | sw_desc->dir = xt->dir; |
747 | sw_desc->interleaved_dma = true; |
748 | sw_desc->cyclic = flags & DMA_PREP_REPEAT; |
749 | sw_desc->frames_left = xt->numf; |
750 | sw_desc->periods = xt->numf; |
751 | |
752 | desc_num = 0; |
753 | src_addr = xt->src_start; |
754 | dst_addr = xt->dst_start; |
755 | for (i = 0; i < xt->frame_size; ++i) { |
756 | desc_num += xdma_fill_descs(sw_desc, src_addr, dst_addr, size: xt->sgl[i].size, filled_descs_num: desc_num); |
757 | src_addr += dmaengine_get_src_icg(xt, chunk: &xt->sgl[i]) + (xt->src_inc ? |
758 | xt->sgl[i].size : 0); |
759 | dst_addr += dmaengine_get_dst_icg(xt, chunk: &xt->sgl[i]) + (xt->dst_inc ? |
760 | xt->sgl[i].size : 0); |
761 | period_size += xt->sgl[i].size; |
762 | } |
763 | sw_desc->period_size = period_size; |
764 | |
765 | tx_desc = vchan_tx_prep(vc: &xchan->vchan, vd: &sw_desc->vdesc, tx_flags: flags); |
766 | if (tx_desc) |
767 | return tx_desc; |
768 | |
769 | xdma_free_desc(vdesc: &sw_desc->vdesc); |
770 | return NULL; |
771 | } |
772 | |
773 | /** |
774 | * xdma_device_config - Configure the DMA channel |
775 | * @chan: DMA channel |
776 | * @cfg: channel configuration |
777 | */ |
778 | static int xdma_device_config(struct dma_chan *chan, |
779 | struct dma_slave_config *cfg) |
780 | { |
781 | struct xdma_chan *xdma_chan = to_xdma_chan(chan); |
782 | |
783 | memcpy(&xdma_chan->cfg, cfg, sizeof(*cfg)); |
784 | |
785 | return 0; |
786 | } |
787 | |
788 | /** |
789 | * xdma_free_chan_resources - Free channel resources |
790 | * @chan: DMA channel |
791 | */ |
792 | static void xdma_free_chan_resources(struct dma_chan *chan) |
793 | { |
794 | struct xdma_chan *xdma_chan = to_xdma_chan(chan); |
795 | |
796 | vchan_free_chan_resources(vc: &xdma_chan->vchan); |
797 | dma_pool_destroy(pool: xdma_chan->desc_pool); |
798 | xdma_chan->desc_pool = NULL; |
799 | } |
800 | |
801 | /** |
802 | * xdma_alloc_chan_resources - Allocate channel resources |
803 | * @chan: DMA channel |
804 | */ |
805 | static int xdma_alloc_chan_resources(struct dma_chan *chan) |
806 | { |
807 | struct xdma_chan *xdma_chan = to_xdma_chan(chan); |
808 | struct xdma_device *xdev = xdma_chan->xdev_hdl; |
809 | struct device *dev = xdev->dma_dev.dev; |
810 | |
811 | while (dev && !dev_is_pci(dev)) |
812 | dev = dev->parent; |
813 | if (!dev) { |
814 | xdma_err(xdev, "unable to find pci device" ); |
815 | return -EINVAL; |
816 | } |
817 | |
818 | xdma_chan->desc_pool = dma_pool_create(name: dma_chan_name(chan), dev, XDMA_DESC_BLOCK_SIZE, |
819 | XDMA_DESC_BLOCK_ALIGN, XDMA_DESC_BLOCK_BOUNDARY); |
820 | if (!xdma_chan->desc_pool) { |
821 | xdma_err(xdev, "unable to allocate descriptor pool" ); |
822 | return -ENOMEM; |
823 | } |
824 | |
825 | return 0; |
826 | } |
827 | |
828 | static enum dma_status xdma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, |
829 | struct dma_tx_state *state) |
830 | { |
831 | struct xdma_chan *xdma_chan = to_xdma_chan(chan); |
832 | struct xdma_desc *desc = NULL; |
833 | struct virt_dma_desc *vd; |
834 | enum dma_status ret; |
835 | unsigned long flags; |
836 | unsigned int period_idx; |
837 | u32 residue = 0; |
838 | |
839 | ret = dma_cookie_status(chan, cookie, state); |
840 | if (ret == DMA_COMPLETE) |
841 | return ret; |
842 | |
843 | spin_lock_irqsave(&xdma_chan->vchan.lock, flags); |
844 | |
845 | vd = vchan_find_desc(&xdma_chan->vchan, cookie); |
846 | if (!vd) |
847 | goto out; |
848 | |
849 | desc = to_xdma_desc(vdesc: vd); |
850 | if (desc->error) { |
851 | ret = DMA_ERROR; |
852 | } else if (desc->cyclic) { |
853 | period_idx = desc->completed_desc_num % desc->periods; |
854 | residue = (desc->periods - period_idx) * desc->period_size; |
855 | dma_set_residue(state, residue); |
856 | } |
857 | out: |
858 | spin_unlock_irqrestore(lock: &xdma_chan->vchan.lock, flags); |
859 | |
860 | return ret; |
861 | } |
862 | |
863 | /** |
864 | * xdma_channel_isr - XDMA channel interrupt handler |
865 | * @irq: IRQ number |
866 | * @dev_id: Pointer to the DMA channel structure |
867 | */ |
868 | static irqreturn_t xdma_channel_isr(int irq, void *dev_id) |
869 | { |
870 | struct xdma_chan *xchan = dev_id; |
871 | u32 complete_desc_num = 0; |
872 | struct xdma_device *xdev = xchan->xdev_hdl; |
873 | struct virt_dma_desc *vd, *next_vd; |
874 | struct xdma_desc *desc; |
875 | int ret; |
876 | u32 st; |
877 | bool repeat_tx; |
878 | |
879 | spin_lock(lock: &xchan->vchan.lock); |
880 | |
881 | /* get submitted request */ |
882 | vd = vchan_next_desc(vc: &xchan->vchan); |
883 | if (!vd) |
884 | goto out; |
885 | |
886 | /* Clear-on-read the status register */ |
887 | ret = regmap_read(map: xdev->rmap, reg: xchan->base + XDMA_CHAN_STATUS_RC, val: &st); |
888 | if (ret) |
889 | goto out; |
890 | |
891 | desc = to_xdma_desc(vdesc: vd); |
892 | |
893 | st &= XDMA_CHAN_STATUS_MASK; |
894 | if ((st & XDMA_CHAN_ERROR_MASK) || |
895 | !(st & (CHAN_CTRL_IE_DESC_COMPLETED | CHAN_CTRL_IE_DESC_STOPPED))) { |
896 | desc->error = true; |
897 | xdma_err(xdev, "channel error, status register value: 0x%x" , st); |
898 | goto out; |
899 | } |
900 | |
901 | ret = regmap_read(map: xdev->rmap, reg: xchan->base + XDMA_CHAN_COMPLETED_DESC, |
902 | val: &complete_desc_num); |
903 | if (ret) |
904 | goto out; |
905 | |
906 | if (desc->interleaved_dma) { |
907 | xchan->busy = false; |
908 | desc->completed_desc_num += complete_desc_num; |
909 | if (complete_desc_num == XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT) { |
910 | xdma_xfer_start(xchan); |
911 | goto out; |
912 | } |
913 | |
914 | /* last desc of any frame */ |
915 | desc->frames_left--; |
916 | if (desc->frames_left) |
917 | goto out; |
918 | |
919 | /* last desc of the last frame */ |
920 | repeat_tx = vd->tx.flags & DMA_PREP_REPEAT; |
921 | next_vd = list_first_entry_or_null(&vd->node, struct virt_dma_desc, node); |
922 | if (next_vd) |
923 | repeat_tx = repeat_tx && !(next_vd->tx.flags & DMA_PREP_LOAD_EOT); |
924 | if (repeat_tx) { |
925 | desc->frames_left = desc->periods; |
926 | desc->completed_desc_num = 0; |
927 | vchan_cyclic_callback(vd); |
928 | } else { |
929 | list_del(entry: &vd->node); |
930 | vchan_cookie_complete(vd); |
931 | } |
932 | /* start (or continue) the tx of a first desc on the vc.desc_issued list, if any */ |
933 | xdma_xfer_start(xchan); |
934 | } else if (!desc->cyclic) { |
935 | xchan->busy = false; |
936 | desc->completed_desc_num += complete_desc_num; |
937 | |
938 | /* if all data blocks are transferred, remove and complete the request */ |
939 | if (desc->completed_desc_num == desc->desc_num) { |
940 | list_del(entry: &vd->node); |
941 | vchan_cookie_complete(vd); |
942 | goto out; |
943 | } |
944 | |
945 | if (desc->completed_desc_num > desc->desc_num || |
946 | complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT) |
947 | goto out; |
948 | |
949 | /* transfer the rest of data */ |
950 | xdma_xfer_start(xchan); |
951 | } else { |
952 | desc->completed_desc_num = complete_desc_num; |
953 | vchan_cyclic_callback(vd); |
954 | } |
955 | |
956 | out: |
957 | spin_unlock(lock: &xchan->vchan.lock); |
958 | return IRQ_HANDLED; |
959 | } |
960 | |
961 | /** |
962 | * xdma_irq_fini - Uninitialize IRQ |
963 | * @xdev: DMA device pointer |
964 | */ |
965 | static void xdma_irq_fini(struct xdma_device *xdev) |
966 | { |
967 | int i; |
968 | |
969 | /* disable interrupt */ |
970 | regmap_write(map: xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1C, val: ~0); |
971 | |
972 | /* free irq handler */ |
973 | for (i = 0; i < xdev->h2c_chan_num; i++) |
974 | free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]); |
975 | |
976 | for (i = 0; i < xdev->c2h_chan_num; i++) |
977 | free_irq(xdev->c2h_chans[i].irq, &xdev->c2h_chans[i]); |
978 | } |
979 | |
980 | /** |
981 | * xdma_set_vector_reg - configure hardware IRQ registers |
982 | * @xdev: DMA device pointer |
983 | * @vec_tbl_start: Start of IRQ registers |
984 | * @irq_start: Start of IRQ |
985 | * @irq_num: Number of IRQ |
986 | */ |
987 | static int xdma_set_vector_reg(struct xdma_device *xdev, u32 vec_tbl_start, |
988 | u32 irq_start, u32 irq_num) |
989 | { |
990 | u32 shift, i, val = 0; |
991 | int ret; |
992 | |
993 | /* Each IRQ register is 32 bit and contains 4 IRQs */ |
994 | while (irq_num > 0) { |
995 | for (i = 0; i < 4; i++) { |
996 | shift = XDMA_IRQ_VEC_SHIFT * i; |
997 | val |= irq_start << shift; |
998 | irq_start++; |
999 | irq_num--; |
1000 | if (!irq_num) |
1001 | break; |
1002 | } |
1003 | |
1004 | /* write IRQ register */ |
1005 | ret = regmap_write(map: xdev->rmap, reg: vec_tbl_start, val); |
1006 | if (ret) |
1007 | return ret; |
1008 | vec_tbl_start += sizeof(u32); |
1009 | val = 0; |
1010 | } |
1011 | |
1012 | return 0; |
1013 | } |
1014 | |
1015 | /** |
1016 | * xdma_irq_init - initialize IRQs |
1017 | * @xdev: DMA device pointer |
1018 | */ |
1019 | static int xdma_irq_init(struct xdma_device *xdev) |
1020 | { |
1021 | u32 irq = xdev->irq_start; |
1022 | u32 user_irq_start; |
1023 | int i, j, ret; |
1024 | |
1025 | /* return failure if there are not enough IRQs */ |
1026 | if (xdev->irq_num < XDMA_CHAN_NUM(xdev)) { |
1027 | xdma_err(xdev, "not enough irq" ); |
1028 | return -EINVAL; |
1029 | } |
1030 | |
1031 | /* setup H2C interrupt handler */ |
1032 | for (i = 0; i < xdev->h2c_chan_num; i++) { |
1033 | ret = request_irq(irq, handler: xdma_channel_isr, flags: 0, |
1034 | name: "xdma-h2c-channel" , dev: &xdev->h2c_chans[i]); |
1035 | if (ret) { |
1036 | xdma_err(xdev, "H2C channel%d request irq%d failed: %d" , |
1037 | i, irq, ret); |
1038 | goto failed_init_h2c; |
1039 | } |
1040 | xdev->h2c_chans[i].irq = irq; |
1041 | irq++; |
1042 | } |
1043 | |
1044 | /* setup C2H interrupt handler */ |
1045 | for (j = 0; j < xdev->c2h_chan_num; j++) { |
1046 | ret = request_irq(irq, handler: xdma_channel_isr, flags: 0, |
1047 | name: "xdma-c2h-channel" , dev: &xdev->c2h_chans[j]); |
1048 | if (ret) { |
1049 | xdma_err(xdev, "C2H channel%d request irq%d failed: %d" , |
1050 | j, irq, ret); |
1051 | goto failed_init_c2h; |
1052 | } |
1053 | xdev->c2h_chans[j].irq = irq; |
1054 | irq++; |
1055 | } |
1056 | |
1057 | /* config hardware IRQ registers */ |
1058 | ret = xdma_set_vector_reg(xdev, XDMA_IRQ_CHAN_VEC_NUM, irq_start: 0, |
1059 | XDMA_CHAN_NUM(xdev)); |
1060 | if (ret) { |
1061 | xdma_err(xdev, "failed to set channel vectors: %d" , ret); |
1062 | goto failed_init_c2h; |
1063 | } |
1064 | |
1065 | /* config user IRQ registers if needed */ |
1066 | user_irq_start = XDMA_CHAN_NUM(xdev); |
1067 | if (xdev->irq_num > user_irq_start) { |
1068 | ret = xdma_set_vector_reg(xdev, XDMA_IRQ_USER_VEC_NUM, |
1069 | irq_start: user_irq_start, |
1070 | irq_num: xdev->irq_num - user_irq_start); |
1071 | if (ret) { |
1072 | xdma_err(xdev, "failed to set user vectors: %d" , ret); |
1073 | goto failed_init_c2h; |
1074 | } |
1075 | } |
1076 | |
1077 | /* enable interrupt */ |
1078 | ret = regmap_write(map: xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1S, val: ~0); |
1079 | if (ret) |
1080 | goto failed_init_c2h; |
1081 | |
1082 | return 0; |
1083 | |
1084 | failed_init_c2h: |
1085 | while (j--) |
1086 | free_irq(xdev->c2h_chans[j].irq, &xdev->c2h_chans[j]); |
1087 | failed_init_h2c: |
1088 | while (i--) |
1089 | free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]); |
1090 | |
1091 | return ret; |
1092 | } |
1093 | |
1094 | static bool xdma_filter_fn(struct dma_chan *chan, void *param) |
1095 | { |
1096 | struct xdma_chan *xdma_chan = to_xdma_chan(chan); |
1097 | struct xdma_chan_info *chan_info = param; |
1098 | |
1099 | return chan_info->dir == xdma_chan->dir; |
1100 | } |
1101 | |
1102 | /** |
1103 | * xdma_disable_user_irq - Disable user interrupt |
1104 | * @pdev: Pointer to the platform_device structure |
1105 | * @irq_num: System IRQ number |
1106 | */ |
1107 | void xdma_disable_user_irq(struct platform_device *pdev, u32 irq_num) |
1108 | { |
1109 | struct xdma_device *xdev = platform_get_drvdata(pdev); |
1110 | u32 index; |
1111 | |
1112 | index = irq_num - xdev->irq_start; |
1113 | if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) { |
1114 | xdma_err(xdev, "invalid user irq number" ); |
1115 | return; |
1116 | } |
1117 | index -= XDMA_CHAN_NUM(xdev); |
1118 | |
1119 | regmap_write(map: xdev->rmap, XDMA_IRQ_USER_INT_EN_W1C, val: 1 << index); |
1120 | } |
1121 | EXPORT_SYMBOL(xdma_disable_user_irq); |
1122 | |
1123 | /** |
1124 | * xdma_enable_user_irq - Enable user logic interrupt |
1125 | * @pdev: Pointer to the platform_device structure |
1126 | * @irq_num: System IRQ number |
1127 | */ |
1128 | int xdma_enable_user_irq(struct platform_device *pdev, u32 irq_num) |
1129 | { |
1130 | struct xdma_device *xdev = platform_get_drvdata(pdev); |
1131 | u32 index; |
1132 | int ret; |
1133 | |
1134 | index = irq_num - xdev->irq_start; |
1135 | if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) { |
1136 | xdma_err(xdev, "invalid user irq number" ); |
1137 | return -EINVAL; |
1138 | } |
1139 | index -= XDMA_CHAN_NUM(xdev); |
1140 | |
1141 | ret = regmap_write(map: xdev->rmap, XDMA_IRQ_USER_INT_EN_W1S, val: 1 << index); |
1142 | if (ret) |
1143 | return ret; |
1144 | |
1145 | return 0; |
1146 | } |
1147 | EXPORT_SYMBOL(xdma_enable_user_irq); |
1148 | |
1149 | /** |
1150 | * xdma_get_user_irq - Get system IRQ number |
1151 | * @pdev: Pointer to the platform_device structure |
1152 | * @user_irq_index: User logic IRQ wire index |
1153 | * |
1154 | * Return: The system IRQ number allocated for the given wire index. |
1155 | */ |
1156 | int xdma_get_user_irq(struct platform_device *pdev, u32 user_irq_index) |
1157 | { |
1158 | struct xdma_device *xdev = platform_get_drvdata(pdev); |
1159 | |
1160 | if (XDMA_CHAN_NUM(xdev) + user_irq_index >= xdev->irq_num) { |
1161 | xdma_err(xdev, "invalid user irq index" ); |
1162 | return -EINVAL; |
1163 | } |
1164 | |
1165 | return xdev->irq_start + XDMA_CHAN_NUM(xdev) + user_irq_index; |
1166 | } |
1167 | EXPORT_SYMBOL(xdma_get_user_irq); |
1168 | |
1169 | /** |
1170 | * xdma_remove - Driver remove function |
1171 | * @pdev: Pointer to the platform_device structure |
1172 | */ |
1173 | static void xdma_remove(struct platform_device *pdev) |
1174 | { |
1175 | struct xdma_device *xdev = platform_get_drvdata(pdev); |
1176 | |
1177 | if (xdev->status & XDMA_DEV_STATUS_INIT_MSIX) |
1178 | xdma_irq_fini(xdev); |
1179 | |
1180 | if (xdev->status & XDMA_DEV_STATUS_REG_DMA) |
1181 | dma_async_device_unregister(device: &xdev->dma_dev); |
1182 | } |
1183 | |
1184 | /** |
1185 | * xdma_probe - Driver probe function |
1186 | * @pdev: Pointer to the platform_device structure |
1187 | */ |
1188 | static int xdma_probe(struct platform_device *pdev) |
1189 | { |
1190 | struct xdma_platdata *pdata = dev_get_platdata(dev: &pdev->dev); |
1191 | struct xdma_device *xdev; |
1192 | void __iomem *reg_base; |
1193 | struct resource *res; |
1194 | int ret = -ENODEV; |
1195 | |
1196 | if (pdata->max_dma_channels > XDMA_MAX_CHANNELS) { |
1197 | dev_err(&pdev->dev, "invalid max dma channels %d" , |
1198 | pdata->max_dma_channels); |
1199 | return -EINVAL; |
1200 | } |
1201 | |
1202 | xdev = devm_kzalloc(dev: &pdev->dev, size: sizeof(*xdev), GFP_KERNEL); |
1203 | if (!xdev) |
1204 | return -ENOMEM; |
1205 | |
1206 | platform_set_drvdata(pdev, data: xdev); |
1207 | xdev->pdev = pdev; |
1208 | |
1209 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
1210 | if (!res) { |
1211 | xdma_err(xdev, "failed to get irq resource" ); |
1212 | goto failed; |
1213 | } |
1214 | xdev->irq_start = res->start; |
1215 | xdev->irq_num = resource_size(res); |
1216 | |
1217 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1218 | if (!res) { |
1219 | xdma_err(xdev, "failed to get io resource" ); |
1220 | goto failed; |
1221 | } |
1222 | |
1223 | reg_base = devm_ioremap_resource(dev: &pdev->dev, res); |
1224 | if (IS_ERR(ptr: reg_base)) { |
1225 | xdma_err(xdev, "ioremap failed" ); |
1226 | goto failed; |
1227 | } |
1228 | |
1229 | xdev->rmap = devm_regmap_init_mmio(&pdev->dev, reg_base, |
1230 | &xdma_regmap_config); |
1231 | if (!xdev->rmap) { |
1232 | xdma_err(xdev, "config regmap failed: %d" , ret); |
1233 | goto failed; |
1234 | } |
1235 | INIT_LIST_HEAD(list: &xdev->dma_dev.channels); |
1236 | |
1237 | ret = xdma_alloc_channels(xdev, dir: DMA_MEM_TO_DEV); |
1238 | if (ret) { |
1239 | xdma_err(xdev, "config H2C channels failed: %d" , ret); |
1240 | goto failed; |
1241 | } |
1242 | |
1243 | ret = xdma_alloc_channels(xdev, dir: DMA_DEV_TO_MEM); |
1244 | if (ret) { |
1245 | xdma_err(xdev, "config C2H channels failed: %d" , ret); |
1246 | goto failed; |
1247 | } |
1248 | |
1249 | dma_cap_set(DMA_SLAVE, xdev->dma_dev.cap_mask); |
1250 | dma_cap_set(DMA_PRIVATE, xdev->dma_dev.cap_mask); |
1251 | dma_cap_set(DMA_CYCLIC, xdev->dma_dev.cap_mask); |
1252 | dma_cap_set(DMA_INTERLEAVE, xdev->dma_dev.cap_mask); |
1253 | dma_cap_set(DMA_REPEAT, xdev->dma_dev.cap_mask); |
1254 | dma_cap_set(DMA_LOAD_EOT, xdev->dma_dev.cap_mask); |
1255 | |
1256 | xdev->dma_dev.dev = &pdev->dev; |
1257 | xdev->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; |
1258 | xdev->dma_dev.device_free_chan_resources = xdma_free_chan_resources; |
1259 | xdev->dma_dev.device_alloc_chan_resources = xdma_alloc_chan_resources; |
1260 | xdev->dma_dev.device_tx_status = xdma_tx_status; |
1261 | xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg; |
1262 | xdev->dma_dev.device_config = xdma_device_config; |
1263 | xdev->dma_dev.device_issue_pending = xdma_issue_pending; |
1264 | xdev->dma_dev.device_terminate_all = xdma_terminate_all; |
1265 | xdev->dma_dev.device_synchronize = xdma_synchronize; |
1266 | xdev->dma_dev.filter.map = pdata->device_map; |
1267 | xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt; |
1268 | xdev->dma_dev.filter.fn = xdma_filter_fn; |
1269 | xdev->dma_dev.device_prep_dma_cyclic = xdma_prep_dma_cyclic; |
1270 | xdev->dma_dev.device_prep_interleaved_dma = xdma_prep_interleaved_dma; |
1271 | |
1272 | ret = dma_async_device_register(device: &xdev->dma_dev); |
1273 | if (ret) { |
1274 | xdma_err(xdev, "failed to register Xilinx XDMA: %d" , ret); |
1275 | goto failed; |
1276 | } |
1277 | xdev->status |= XDMA_DEV_STATUS_REG_DMA; |
1278 | |
1279 | ret = xdma_irq_init(xdev); |
1280 | if (ret) { |
1281 | xdma_err(xdev, "failed to init msix: %d" , ret); |
1282 | goto failed; |
1283 | } |
1284 | xdev->status |= XDMA_DEV_STATUS_INIT_MSIX; |
1285 | |
1286 | return 0; |
1287 | |
1288 | failed: |
1289 | xdma_remove(pdev); |
1290 | |
1291 | return ret; |
1292 | } |
1293 | |
1294 | static const struct platform_device_id xdma_id_table[] = { |
1295 | { "xdma" , 0}, |
1296 | { }, |
1297 | }; |
1298 | |
1299 | static struct platform_driver xdma_driver = { |
1300 | .driver = { |
1301 | .name = "xdma" , |
1302 | }, |
1303 | .id_table = xdma_id_table, |
1304 | .probe = xdma_probe, |
1305 | .remove_new = xdma_remove, |
1306 | }; |
1307 | |
1308 | module_platform_driver(xdma_driver); |
1309 | |
1310 | MODULE_DESCRIPTION("AMD XDMA driver" ); |
1311 | MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>" ); |
1312 | MODULE_LICENSE("GPL" ); |
1313 | |