1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Xilinx ZynqMP DPDMA Engine driver |
4 | * |
5 | * Copyright (C) 2015 - 2020 Xilinx, Inc. |
6 | * |
7 | * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com> |
8 | */ |
9 | |
10 | #include <linux/bitfield.h> |
11 | #include <linux/bits.h> |
12 | #include <linux/clk.h> |
13 | #include <linux/debugfs.h> |
14 | #include <linux/delay.h> |
15 | #include <linux/dma/xilinx_dpdma.h> |
16 | #include <linux/dmaengine.h> |
17 | #include <linux/dmapool.h> |
18 | #include <linux/interrupt.h> |
19 | #include <linux/module.h> |
20 | #include <linux/of.h> |
21 | #include <linux/of_dma.h> |
22 | #include <linux/platform_device.h> |
23 | #include <linux/sched.h> |
24 | #include <linux/slab.h> |
25 | #include <linux/spinlock.h> |
26 | #include <linux/wait.h> |
27 | |
28 | #include <dt-bindings/dma/xlnx-zynqmp-dpdma.h> |
29 | |
30 | #include "../dmaengine.h" |
31 | #include "../virt-dma.h" |
32 | |
33 | /* DPDMA registers */ |
34 | #define XILINX_DPDMA_ERR_CTRL 0x000 |
35 | #define XILINX_DPDMA_ISR 0x004 |
36 | #define XILINX_DPDMA_IMR 0x008 |
37 | #define XILINX_DPDMA_IEN 0x00c |
38 | #define XILINX_DPDMA_IDS 0x010 |
39 | #define XILINX_DPDMA_INTR_DESC_DONE(n) BIT((n) + 0) |
40 | #define XILINX_DPDMA_INTR_DESC_DONE_MASK GENMASK(5, 0) |
41 | #define XILINX_DPDMA_INTR_NO_OSTAND(n) BIT((n) + 6) |
42 | #define XILINX_DPDMA_INTR_NO_OSTAND_MASK GENMASK(11, 6) |
43 | #define XILINX_DPDMA_INTR_AXI_ERR(n) BIT((n) + 12) |
44 | #define XILINX_DPDMA_INTR_AXI_ERR_MASK GENMASK(17, 12) |
45 | #define XILINX_DPDMA_INTR_DESC_ERR(n) BIT((n) + 16) |
46 | #define XILINX_DPDMA_INTR_DESC_ERR_MASK GENMASK(23, 18) |
47 | #define XILINX_DPDMA_INTR_WR_CMD_FIFO_FULL BIT(24) |
48 | #define XILINX_DPDMA_INTR_WR_DATA_FIFO_FULL BIT(25) |
49 | #define XILINX_DPDMA_INTR_AXI_4K_CROSS BIT(26) |
50 | #define XILINX_DPDMA_INTR_VSYNC BIT(27) |
51 | #define XILINX_DPDMA_INTR_CHAN_ERR_MASK 0x00041000 |
52 | #define XILINX_DPDMA_INTR_CHAN_ERR 0x00fff000 |
53 | #define XILINX_DPDMA_INTR_GLOBAL_ERR 0x07000000 |
54 | #define XILINX_DPDMA_INTR_ERR_ALL 0x07fff000 |
55 | #define XILINX_DPDMA_INTR_CHAN_MASK 0x00041041 |
56 | #define XILINX_DPDMA_INTR_GLOBAL_MASK 0x0f000000 |
57 | #define XILINX_DPDMA_INTR_ALL 0x0fffffff |
58 | #define XILINX_DPDMA_EISR 0x014 |
59 | #define XILINX_DPDMA_EIMR 0x018 |
60 | #define XILINX_DPDMA_EIEN 0x01c |
61 | #define XILINX_DPDMA_EIDS 0x020 |
62 | #define XILINX_DPDMA_EINTR_INV_APB BIT(0) |
63 | #define XILINX_DPDMA_EINTR_RD_AXI_ERR(n) BIT((n) + 1) |
64 | #define XILINX_DPDMA_EINTR_RD_AXI_ERR_MASK GENMASK(6, 1) |
65 | #define XILINX_DPDMA_EINTR_PRE_ERR(n) BIT((n) + 7) |
66 | #define XILINX_DPDMA_EINTR_PRE_ERR_MASK GENMASK(12, 7) |
67 | #define XILINX_DPDMA_EINTR_CRC_ERR(n) BIT((n) + 13) |
68 | #define XILINX_DPDMA_EINTR_CRC_ERR_MASK GENMASK(18, 13) |
69 | #define XILINX_DPDMA_EINTR_WR_AXI_ERR(n) BIT((n) + 19) |
70 | #define XILINX_DPDMA_EINTR_WR_AXI_ERR_MASK GENMASK(24, 19) |
71 | #define XILINX_DPDMA_EINTR_DESC_DONE_ERR(n) BIT((n) + 25) |
72 | #define XILINX_DPDMA_EINTR_DESC_DONE_ERR_MASK GENMASK(30, 25) |
73 | #define XILINX_DPDMA_EINTR_RD_CMD_FIFO_FULL BIT(32) |
74 | #define XILINX_DPDMA_EINTR_CHAN_ERR_MASK 0x02082082 |
75 | #define XILINX_DPDMA_EINTR_CHAN_ERR 0x7ffffffe |
76 | #define XILINX_DPDMA_EINTR_GLOBAL_ERR 0x80000001 |
77 | #define XILINX_DPDMA_EINTR_ALL 0xffffffff |
78 | #define XILINX_DPDMA_CNTL 0x100 |
79 | #define XILINX_DPDMA_GBL 0x104 |
80 | #define XILINX_DPDMA_GBL_TRIG_MASK(n) ((n) << 0) |
81 | #define XILINX_DPDMA_GBL_RETRIG_MASK(n) ((n) << 6) |
82 | #define XILINX_DPDMA_ALC0_CNTL 0x108 |
83 | #define XILINX_DPDMA_ALC0_STATUS 0x10c |
84 | #define XILINX_DPDMA_ALC0_MAX 0x110 |
85 | #define XILINX_DPDMA_ALC0_MIN 0x114 |
86 | #define XILINX_DPDMA_ALC0_ACC 0x118 |
87 | #define XILINX_DPDMA_ALC0_ACC_TRAN 0x11c |
88 | #define XILINX_DPDMA_ALC1_CNTL 0x120 |
89 | #define XILINX_DPDMA_ALC1_STATUS 0x124 |
90 | #define XILINX_DPDMA_ALC1_MAX 0x128 |
91 | #define XILINX_DPDMA_ALC1_MIN 0x12c |
92 | #define XILINX_DPDMA_ALC1_ACC 0x130 |
93 | #define XILINX_DPDMA_ALC1_ACC_TRAN 0x134 |
94 | |
95 | /* Channel register */ |
96 | #define XILINX_DPDMA_CH_BASE 0x200 |
97 | #define XILINX_DPDMA_CH_OFFSET 0x100 |
98 | #define XILINX_DPDMA_CH_DESC_START_ADDRE 0x000 |
99 | #define XILINX_DPDMA_CH_DESC_START_ADDRE_MASK GENMASK(15, 0) |
100 | #define XILINX_DPDMA_CH_DESC_START_ADDR 0x004 |
101 | #define XILINX_DPDMA_CH_DESC_NEXT_ADDRE 0x008 |
102 | #define XILINX_DPDMA_CH_DESC_NEXT_ADDR 0x00c |
103 | #define XILINX_DPDMA_CH_PYLD_CUR_ADDRE 0x010 |
104 | #define XILINX_DPDMA_CH_PYLD_CUR_ADDR 0x014 |
105 | #define XILINX_DPDMA_CH_CNTL 0x018 |
106 | #define XILINX_DPDMA_CH_CNTL_ENABLE BIT(0) |
107 | #define XILINX_DPDMA_CH_CNTL_PAUSE BIT(1) |
108 | #define XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_MASK GENMASK(5, 2) |
109 | #define XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_MASK GENMASK(9, 6) |
110 | #define XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_MASK GENMASK(13, 10) |
111 | #define XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS 11 |
112 | #define XILINX_DPDMA_CH_STATUS 0x01c |
113 | #define XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK GENMASK(24, 21) |
114 | #define XILINX_DPDMA_CH_VDO 0x020 |
115 | #define XILINX_DPDMA_CH_PYLD_SZ 0x024 |
116 | #define XILINX_DPDMA_CH_DESC_ID 0x028 |
117 | #define XILINX_DPDMA_CH_DESC_ID_MASK GENMASK(15, 0) |
118 | |
119 | /* DPDMA descriptor fields */ |
120 | #define XILINX_DPDMA_DESC_CONTROL_PREEMBLE 0xa5 |
121 | #define XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR BIT(8) |
122 | #define XILINX_DPDMA_DESC_CONTROL_DESC_UPDATE BIT(9) |
123 | #define XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE BIT(10) |
124 | #define XILINX_DPDMA_DESC_CONTROL_FRAG_MODE BIT(18) |
125 | #define XILINX_DPDMA_DESC_CONTROL_LAST BIT(19) |
126 | #define XILINX_DPDMA_DESC_CONTROL_ENABLE_CRC BIT(20) |
127 | #define XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME BIT(21) |
128 | #define XILINX_DPDMA_DESC_ID_MASK GENMASK(15, 0) |
129 | #define XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK GENMASK(17, 0) |
130 | #define XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK GENMASK(31, 18) |
131 | #define XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK GENMASK(15, 0) |
132 | #define XILINX_DPDMA_DESC_ADDR_EXT_SRC_ADDR_MASK GENMASK(31, 16) |
133 | |
134 | #define XILINX_DPDMA_ALIGN_BYTES 256 |
135 | #define XILINX_DPDMA_LINESIZE_ALIGN_BITS 128 |
136 | |
137 | #define XILINX_DPDMA_NUM_CHAN 6 |
138 | |
139 | struct xilinx_dpdma_chan; |
140 | |
141 | /** |
142 | * struct xilinx_dpdma_hw_desc - DPDMA hardware descriptor |
143 | * @control: control configuration field |
144 | * @desc_id: descriptor ID |
145 | * @xfer_size: transfer size |
146 | * @hsize_stride: horizontal size and stride |
147 | * @timestamp_lsb: LSB of time stamp |
148 | * @timestamp_msb: MSB of time stamp |
149 | * @addr_ext: upper 16 bit of 48 bit address (next_desc and src_addr) |
150 | * @next_desc: next descriptor 32 bit address |
151 | * @src_addr: payload source address (1st page, 32 LSB) |
152 | * @addr_ext_23: payload source address (3nd and 3rd pages, 16 LSBs) |
153 | * @addr_ext_45: payload source address (4th and 5th pages, 16 LSBs) |
154 | * @src_addr2: payload source address (2nd page, 32 LSB) |
155 | * @src_addr3: payload source address (3rd page, 32 LSB) |
156 | * @src_addr4: payload source address (4th page, 32 LSB) |
157 | * @src_addr5: payload source address (5th page, 32 LSB) |
158 | * @crc: descriptor CRC |
159 | */ |
160 | struct xilinx_dpdma_hw_desc { |
161 | u32 control; |
162 | u32 desc_id; |
163 | u32 xfer_size; |
164 | u32 hsize_stride; |
165 | u32 timestamp_lsb; |
166 | u32 timestamp_msb; |
167 | u32 addr_ext; |
168 | u32 next_desc; |
169 | u32 src_addr; |
170 | u32 addr_ext_23; |
171 | u32 addr_ext_45; |
172 | u32 src_addr2; |
173 | u32 src_addr3; |
174 | u32 src_addr4; |
175 | u32 src_addr5; |
176 | u32 crc; |
177 | } __aligned(XILINX_DPDMA_ALIGN_BYTES); |
178 | |
179 | /** |
180 | * struct xilinx_dpdma_sw_desc - DPDMA software descriptor |
181 | * @hw: DPDMA hardware descriptor |
182 | * @node: list node for software descriptors |
183 | * @dma_addr: DMA address of the software descriptor |
184 | */ |
185 | struct xilinx_dpdma_sw_desc { |
186 | struct xilinx_dpdma_hw_desc hw; |
187 | struct list_head node; |
188 | dma_addr_t dma_addr; |
189 | }; |
190 | |
191 | /** |
192 | * struct xilinx_dpdma_tx_desc - DPDMA transaction descriptor |
193 | * @vdesc: virtual DMA descriptor |
194 | * @chan: DMA channel |
195 | * @descriptors: list of software descriptors |
196 | * @error: an error has been detected with this descriptor |
197 | */ |
198 | struct xilinx_dpdma_tx_desc { |
199 | struct virt_dma_desc vdesc; |
200 | struct xilinx_dpdma_chan *chan; |
201 | struct list_head descriptors; |
202 | bool error; |
203 | }; |
204 | |
205 | #define to_dpdma_tx_desc(_desc) \ |
206 | container_of(_desc, struct xilinx_dpdma_tx_desc, vdesc) |
207 | |
208 | /** |
209 | * struct xilinx_dpdma_chan - DPDMA channel |
210 | * @vchan: virtual DMA channel |
211 | * @reg: register base address |
212 | * @id: channel ID |
213 | * @wait_to_stop: queue to wait for outstanding transacitons before stopping |
214 | * @running: true if the channel is running |
215 | * @first_frame: flag for the first frame of stream |
216 | * @video_group: flag if multi-channel operation is needed for video channels |
217 | * @lock: lock to access struct xilinx_dpdma_chan |
218 | * @desc_pool: descriptor allocation pool |
219 | * @err_task: error IRQ bottom half handler |
220 | * @desc: References to descriptors being processed |
221 | * @desc.pending: Descriptor schedule to the hardware, pending execution |
222 | * @desc.active: Descriptor being executed by the hardware |
223 | * @xdev: DPDMA device |
224 | */ |
225 | struct xilinx_dpdma_chan { |
226 | struct virt_dma_chan vchan; |
227 | void __iomem *reg; |
228 | unsigned int id; |
229 | |
230 | wait_queue_head_t wait_to_stop; |
231 | bool running; |
232 | bool first_frame; |
233 | bool video_group; |
234 | |
235 | spinlock_t lock; /* lock to access struct xilinx_dpdma_chan */ |
236 | struct dma_pool *desc_pool; |
237 | struct tasklet_struct err_task; |
238 | |
239 | struct { |
240 | struct xilinx_dpdma_tx_desc *pending; |
241 | struct xilinx_dpdma_tx_desc *active; |
242 | } desc; |
243 | |
244 | struct xilinx_dpdma_device *xdev; |
245 | }; |
246 | |
247 | #define to_xilinx_chan(_chan) \ |
248 | container_of(_chan, struct xilinx_dpdma_chan, vchan.chan) |
249 | |
250 | /** |
251 | * struct xilinx_dpdma_device - DPDMA device |
252 | * @common: generic dma device structure |
253 | * @reg: register base address |
254 | * @dev: generic device structure |
255 | * @irq: the interrupt number |
256 | * @axi_clk: axi clock |
257 | * @chan: DPDMA channels |
258 | * @ext_addr: flag for 64 bit system (48 bit addressing) |
259 | */ |
260 | struct xilinx_dpdma_device { |
261 | struct dma_device common; |
262 | void __iomem *reg; |
263 | struct device *dev; |
264 | int irq; |
265 | |
266 | struct clk *axi_clk; |
267 | struct xilinx_dpdma_chan *chan[XILINX_DPDMA_NUM_CHAN]; |
268 | |
269 | bool ext_addr; |
270 | }; |
271 | |
272 | /* ----------------------------------------------------------------------------- |
273 | * DebugFS |
274 | */ |
275 | #define XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE 32 |
276 | #define XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR "65535" |
277 | |
278 | /* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */ |
279 | enum xilinx_dpdma_testcases { |
280 | DPDMA_TC_INTR_DONE, |
281 | DPDMA_TC_NONE |
282 | }; |
283 | |
284 | struct xilinx_dpdma_debugfs { |
285 | enum xilinx_dpdma_testcases testcase; |
286 | u16 xilinx_dpdma_irq_done_count; |
287 | unsigned int chan_id; |
288 | }; |
289 | |
290 | static struct xilinx_dpdma_debugfs dpdma_debugfs; |
291 | struct xilinx_dpdma_debugfs_request { |
292 | const char *name; |
293 | enum xilinx_dpdma_testcases tc; |
294 | ssize_t (*read)(char *buf); |
295 | int (*write)(char *args); |
296 | }; |
297 | |
298 | static void xilinx_dpdma_debugfs_desc_done_irq(struct xilinx_dpdma_chan *chan) |
299 | { |
300 | if (IS_ENABLED(CONFIG_DEBUG_FS) && chan->id == dpdma_debugfs.chan_id) |
301 | dpdma_debugfs.xilinx_dpdma_irq_done_count++; |
302 | } |
303 | |
304 | static ssize_t xilinx_dpdma_debugfs_desc_done_irq_read(char *buf) |
305 | { |
306 | size_t out_str_len; |
307 | |
308 | dpdma_debugfs.testcase = DPDMA_TC_NONE; |
309 | |
310 | out_str_len = strlen(XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR); |
311 | out_str_len = min_t(size_t, XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE, |
312 | out_str_len + 1); |
313 | snprintf(buf, size: out_str_len, fmt: "%d" , |
314 | dpdma_debugfs.xilinx_dpdma_irq_done_count); |
315 | |
316 | return 0; |
317 | } |
318 | |
319 | static int xilinx_dpdma_debugfs_desc_done_irq_write(char *args) |
320 | { |
321 | char *arg; |
322 | int ret; |
323 | u32 id; |
324 | |
325 | arg = strsep(&args, " " ); |
326 | if (!arg || strncasecmp(s1: arg, s2: "start" , n: 5)) |
327 | return -EINVAL; |
328 | |
329 | arg = strsep(&args, " " ); |
330 | if (!arg) |
331 | return -EINVAL; |
332 | |
333 | ret = kstrtou32(s: arg, base: 0, res: &id); |
334 | if (ret < 0) |
335 | return ret; |
336 | |
337 | if (id < ZYNQMP_DPDMA_VIDEO0 || id > ZYNQMP_DPDMA_AUDIO1) |
338 | return -EINVAL; |
339 | |
340 | dpdma_debugfs.testcase = DPDMA_TC_INTR_DONE; |
341 | dpdma_debugfs.xilinx_dpdma_irq_done_count = 0; |
342 | dpdma_debugfs.chan_id = id; |
343 | |
344 | return 0; |
345 | } |
346 | |
347 | /* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */ |
348 | static struct xilinx_dpdma_debugfs_request dpdma_debugfs_reqs[] = { |
349 | { |
350 | .name = "DESCRIPTOR_DONE_INTR" , |
351 | .tc = DPDMA_TC_INTR_DONE, |
352 | .read = xilinx_dpdma_debugfs_desc_done_irq_read, |
353 | .write = xilinx_dpdma_debugfs_desc_done_irq_write, |
354 | }, |
355 | }; |
356 | |
357 | static ssize_t xilinx_dpdma_debugfs_read(struct file *f, char __user *buf, |
358 | size_t size, loff_t *pos) |
359 | { |
360 | enum xilinx_dpdma_testcases testcase; |
361 | char *kern_buff; |
362 | int ret = 0; |
363 | |
364 | if (*pos != 0 || size <= 0) |
365 | return -EINVAL; |
366 | |
367 | kern_buff = kzalloc(XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE, GFP_KERNEL); |
368 | if (!kern_buff) { |
369 | dpdma_debugfs.testcase = DPDMA_TC_NONE; |
370 | return -ENOMEM; |
371 | } |
372 | |
373 | testcase = READ_ONCE(dpdma_debugfs.testcase); |
374 | if (testcase != DPDMA_TC_NONE) { |
375 | ret = dpdma_debugfs_reqs[testcase].read(kern_buff); |
376 | if (ret < 0) |
377 | goto done; |
378 | } else { |
379 | strscpy(kern_buff, "No testcase executed" , |
380 | XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE); |
381 | } |
382 | |
383 | size = min(size, strlen(kern_buff)); |
384 | if (copy_to_user(to: buf, from: kern_buff, n: size)) |
385 | ret = -EFAULT; |
386 | |
387 | done: |
388 | kfree(objp: kern_buff); |
389 | if (ret) |
390 | return ret; |
391 | |
392 | *pos = size + 1; |
393 | return size; |
394 | } |
395 | |
396 | static ssize_t xilinx_dpdma_debugfs_write(struct file *f, |
397 | const char __user *buf, size_t size, |
398 | loff_t *pos) |
399 | { |
400 | char *kern_buff, *kern_buff_start; |
401 | char *testcase; |
402 | unsigned int i; |
403 | int ret; |
404 | |
405 | if (*pos != 0 || size <= 0) |
406 | return -EINVAL; |
407 | |
408 | /* Supporting single instance of test as of now. */ |
409 | if (dpdma_debugfs.testcase != DPDMA_TC_NONE) |
410 | return -EBUSY; |
411 | |
412 | kern_buff = kzalloc(size, GFP_KERNEL); |
413 | if (!kern_buff) |
414 | return -ENOMEM; |
415 | kern_buff_start = kern_buff; |
416 | |
417 | ret = strncpy_from_user(dst: kern_buff, src: buf, count: size); |
418 | if (ret < 0) |
419 | goto done; |
420 | |
421 | /* Read the testcase name from a user request. */ |
422 | testcase = strsep(&kern_buff, " " ); |
423 | |
424 | for (i = 0; i < ARRAY_SIZE(dpdma_debugfs_reqs); i++) { |
425 | if (!strcasecmp(s1: testcase, s2: dpdma_debugfs_reqs[i].name)) |
426 | break; |
427 | } |
428 | |
429 | if (i == ARRAY_SIZE(dpdma_debugfs_reqs)) { |
430 | ret = -EINVAL; |
431 | goto done; |
432 | } |
433 | |
434 | ret = dpdma_debugfs_reqs[i].write(kern_buff); |
435 | if (ret < 0) |
436 | goto done; |
437 | |
438 | ret = size; |
439 | |
440 | done: |
441 | kfree(objp: kern_buff_start); |
442 | return ret; |
443 | } |
444 | |
445 | static const struct file_operations fops_xilinx_dpdma_dbgfs = { |
446 | .owner = THIS_MODULE, |
447 | .read = xilinx_dpdma_debugfs_read, |
448 | .write = xilinx_dpdma_debugfs_write, |
449 | }; |
450 | |
451 | static void xilinx_dpdma_debugfs_init(struct xilinx_dpdma_device *xdev) |
452 | { |
453 | struct dentry *dent; |
454 | |
455 | dpdma_debugfs.testcase = DPDMA_TC_NONE; |
456 | |
457 | dent = debugfs_create_file(name: "testcase" , mode: 0444, parent: xdev->common.dbg_dev_root, |
458 | NULL, fops: &fops_xilinx_dpdma_dbgfs); |
459 | if (IS_ERR(ptr: dent)) |
460 | dev_err(xdev->dev, "Failed to create debugfs testcase file\n" ); |
461 | } |
462 | |
463 | /* ----------------------------------------------------------------------------- |
464 | * I/O Accessors |
465 | */ |
466 | |
467 | static inline u32 dpdma_read(void __iomem *base, u32 offset) |
468 | { |
469 | return ioread32(base + offset); |
470 | } |
471 | |
472 | static inline void dpdma_write(void __iomem *base, u32 offset, u32 val) |
473 | { |
474 | iowrite32(val, base + offset); |
475 | } |
476 | |
477 | static inline void dpdma_clr(void __iomem *base, u32 offset, u32 clr) |
478 | { |
479 | dpdma_write(base, offset, val: dpdma_read(base, offset) & ~clr); |
480 | } |
481 | |
482 | static inline void dpdma_set(void __iomem *base, u32 offset, u32 set) |
483 | { |
484 | dpdma_write(base, offset, val: dpdma_read(base, offset) | set); |
485 | } |
486 | |
487 | /* ----------------------------------------------------------------------------- |
488 | * Descriptor Operations |
489 | */ |
490 | |
491 | /** |
492 | * xilinx_dpdma_sw_desc_set_dma_addrs - Set DMA addresses in the descriptor |
493 | * @xdev: DPDMA device |
494 | * @sw_desc: The software descriptor in which to set DMA addresses |
495 | * @prev: The previous descriptor |
496 | * @dma_addr: array of dma addresses |
497 | * @num_src_addr: number of addresses in @dma_addr |
498 | * |
499 | * Set all the DMA addresses in the hardware descriptor corresponding to @dev |
500 | * from @dma_addr. If a previous descriptor is specified in @prev, its next |
501 | * descriptor DMA address is set to the DMA address of @sw_desc. @prev may be |
502 | * identical to @sw_desc for cyclic transfers. |
503 | */ |
504 | static void xilinx_dpdma_sw_desc_set_dma_addrs(struct xilinx_dpdma_device *xdev, |
505 | struct xilinx_dpdma_sw_desc *sw_desc, |
506 | struct xilinx_dpdma_sw_desc *prev, |
507 | dma_addr_t dma_addr[], |
508 | unsigned int num_src_addr) |
509 | { |
510 | struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw; |
511 | unsigned int i; |
512 | |
513 | hw_desc->src_addr = lower_32_bits(dma_addr[0]); |
514 | if (xdev->ext_addr) |
515 | hw_desc->addr_ext |= |
516 | FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_SRC_ADDR_MASK, |
517 | upper_32_bits(dma_addr[0])); |
518 | |
519 | for (i = 1; i < num_src_addr; i++) { |
520 | u32 *addr = &hw_desc->src_addr2; |
521 | |
522 | addr[i - 1] = lower_32_bits(dma_addr[i]); |
523 | |
524 | if (xdev->ext_addr) { |
525 | u32 *addr_ext = &hw_desc->addr_ext_23; |
526 | u32 addr_msb; |
527 | |
528 | addr_msb = upper_32_bits(dma_addr[i]) & GENMASK(15, 0); |
529 | addr_msb <<= 16 * ((i - 1) % 2); |
530 | addr_ext[(i - 1) / 2] |= addr_msb; |
531 | } |
532 | } |
533 | |
534 | if (!prev) |
535 | return; |
536 | |
537 | prev->hw.next_desc = lower_32_bits(sw_desc->dma_addr); |
538 | if (xdev->ext_addr) |
539 | prev->hw.addr_ext |= |
540 | FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK, |
541 | upper_32_bits(sw_desc->dma_addr)); |
542 | } |
543 | |
544 | /** |
545 | * xilinx_dpdma_chan_alloc_sw_desc - Allocate a software descriptor |
546 | * @chan: DPDMA channel |
547 | * |
548 | * Allocate a software descriptor from the channel's descriptor pool. |
549 | * |
550 | * Return: a software descriptor or NULL. |
551 | */ |
552 | static struct xilinx_dpdma_sw_desc * |
553 | xilinx_dpdma_chan_alloc_sw_desc(struct xilinx_dpdma_chan *chan) |
554 | { |
555 | struct xilinx_dpdma_sw_desc *sw_desc; |
556 | dma_addr_t dma_addr; |
557 | |
558 | sw_desc = dma_pool_zalloc(pool: chan->desc_pool, GFP_ATOMIC, handle: &dma_addr); |
559 | if (!sw_desc) |
560 | return NULL; |
561 | |
562 | sw_desc->dma_addr = dma_addr; |
563 | |
564 | return sw_desc; |
565 | } |
566 | |
567 | /** |
568 | * xilinx_dpdma_chan_free_sw_desc - Free a software descriptor |
569 | * @chan: DPDMA channel |
570 | * @sw_desc: software descriptor to free |
571 | * |
572 | * Free a software descriptor from the channel's descriptor pool. |
573 | */ |
574 | static void |
575 | xilinx_dpdma_chan_free_sw_desc(struct xilinx_dpdma_chan *chan, |
576 | struct xilinx_dpdma_sw_desc *sw_desc) |
577 | { |
578 | dma_pool_free(pool: chan->desc_pool, vaddr: sw_desc, addr: sw_desc->dma_addr); |
579 | } |
580 | |
581 | /** |
582 | * xilinx_dpdma_chan_dump_tx_desc - Dump a tx descriptor |
583 | * @chan: DPDMA channel |
584 | * @tx_desc: tx descriptor to dump |
585 | * |
586 | * Dump contents of a tx descriptor |
587 | */ |
588 | static void xilinx_dpdma_chan_dump_tx_desc(struct xilinx_dpdma_chan *chan, |
589 | struct xilinx_dpdma_tx_desc *tx_desc) |
590 | { |
591 | struct xilinx_dpdma_sw_desc *sw_desc; |
592 | struct device *dev = chan->xdev->dev; |
593 | unsigned int i = 0; |
594 | |
595 | dev_dbg(dev, "------- TX descriptor dump start -------\n" ); |
596 | dev_dbg(dev, "------- channel ID = %d -------\n" , chan->id); |
597 | |
598 | list_for_each_entry(sw_desc, &tx_desc->descriptors, node) { |
599 | struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw; |
600 | |
601 | dev_dbg(dev, "------- HW descriptor %d -------\n" , i++); |
602 | dev_dbg(dev, "descriptor DMA addr: %pad\n" , &sw_desc->dma_addr); |
603 | dev_dbg(dev, "control: 0x%08x\n" , hw_desc->control); |
604 | dev_dbg(dev, "desc_id: 0x%08x\n" , hw_desc->desc_id); |
605 | dev_dbg(dev, "xfer_size: 0x%08x\n" , hw_desc->xfer_size); |
606 | dev_dbg(dev, "hsize_stride: 0x%08x\n" , hw_desc->hsize_stride); |
607 | dev_dbg(dev, "timestamp_lsb: 0x%08x\n" , hw_desc->timestamp_lsb); |
608 | dev_dbg(dev, "timestamp_msb: 0x%08x\n" , hw_desc->timestamp_msb); |
609 | dev_dbg(dev, "addr_ext: 0x%08x\n" , hw_desc->addr_ext); |
610 | dev_dbg(dev, "next_desc: 0x%08x\n" , hw_desc->next_desc); |
611 | dev_dbg(dev, "src_addr: 0x%08x\n" , hw_desc->src_addr); |
612 | dev_dbg(dev, "addr_ext_23: 0x%08x\n" , hw_desc->addr_ext_23); |
613 | dev_dbg(dev, "addr_ext_45: 0x%08x\n" , hw_desc->addr_ext_45); |
614 | dev_dbg(dev, "src_addr2: 0x%08x\n" , hw_desc->src_addr2); |
615 | dev_dbg(dev, "src_addr3: 0x%08x\n" , hw_desc->src_addr3); |
616 | dev_dbg(dev, "src_addr4: 0x%08x\n" , hw_desc->src_addr4); |
617 | dev_dbg(dev, "src_addr5: 0x%08x\n" , hw_desc->src_addr5); |
618 | dev_dbg(dev, "crc: 0x%08x\n" , hw_desc->crc); |
619 | } |
620 | |
621 | dev_dbg(dev, "------- TX descriptor dump end -------\n" ); |
622 | } |
623 | |
624 | /** |
625 | * xilinx_dpdma_chan_alloc_tx_desc - Allocate a transaction descriptor |
626 | * @chan: DPDMA channel |
627 | * |
628 | * Allocate a tx descriptor. |
629 | * |
630 | * Return: a tx descriptor or NULL. |
631 | */ |
632 | static struct xilinx_dpdma_tx_desc * |
633 | xilinx_dpdma_chan_alloc_tx_desc(struct xilinx_dpdma_chan *chan) |
634 | { |
635 | struct xilinx_dpdma_tx_desc *tx_desc; |
636 | |
637 | tx_desc = kzalloc(size: sizeof(*tx_desc), GFP_NOWAIT); |
638 | if (!tx_desc) |
639 | return NULL; |
640 | |
641 | INIT_LIST_HEAD(list: &tx_desc->descriptors); |
642 | tx_desc->chan = chan; |
643 | tx_desc->error = false; |
644 | |
645 | return tx_desc; |
646 | } |
647 | |
648 | /** |
649 | * xilinx_dpdma_chan_free_tx_desc - Free a virtual DMA descriptor |
650 | * @vdesc: virtual DMA descriptor |
651 | * |
652 | * Free the virtual DMA descriptor @vdesc including its software descriptors. |
653 | */ |
654 | static void xilinx_dpdma_chan_free_tx_desc(struct virt_dma_desc *vdesc) |
655 | { |
656 | struct xilinx_dpdma_sw_desc *sw_desc, *next; |
657 | struct xilinx_dpdma_tx_desc *desc; |
658 | |
659 | if (!vdesc) |
660 | return; |
661 | |
662 | desc = to_dpdma_tx_desc(vdesc); |
663 | |
664 | list_for_each_entry_safe(sw_desc, next, &desc->descriptors, node) { |
665 | list_del(entry: &sw_desc->node); |
666 | xilinx_dpdma_chan_free_sw_desc(chan: desc->chan, sw_desc); |
667 | } |
668 | |
669 | kfree(objp: desc); |
670 | } |
671 | |
672 | /** |
673 | * xilinx_dpdma_chan_prep_interleaved_dma - Prepare an interleaved dma |
674 | * descriptor |
675 | * @chan: DPDMA channel |
676 | * @xt: dma interleaved template |
677 | * |
678 | * Prepare a tx descriptor including internal software/hardware descriptors |
679 | * based on @xt. |
680 | * |
681 | * Return: A DPDMA TX descriptor on success, or NULL. |
682 | */ |
683 | static struct xilinx_dpdma_tx_desc * |
684 | xilinx_dpdma_chan_prep_interleaved_dma(struct xilinx_dpdma_chan *chan, |
685 | struct dma_interleaved_template *xt) |
686 | { |
687 | struct xilinx_dpdma_tx_desc *tx_desc; |
688 | struct xilinx_dpdma_sw_desc *sw_desc; |
689 | struct xilinx_dpdma_hw_desc *hw_desc; |
690 | size_t hsize = xt->sgl[0].size; |
691 | size_t stride = hsize + xt->sgl[0].icg; |
692 | |
693 | if (!IS_ALIGNED(xt->src_start, XILINX_DPDMA_ALIGN_BYTES)) { |
694 | dev_err(chan->xdev->dev, |
695 | "chan%u: buffer should be aligned at %d B\n" , |
696 | chan->id, XILINX_DPDMA_ALIGN_BYTES); |
697 | return NULL; |
698 | } |
699 | |
700 | tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan); |
701 | if (!tx_desc) |
702 | return NULL; |
703 | |
704 | sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan); |
705 | if (!sw_desc) { |
706 | xilinx_dpdma_chan_free_tx_desc(vdesc: &tx_desc->vdesc); |
707 | return NULL; |
708 | } |
709 | |
710 | xilinx_dpdma_sw_desc_set_dma_addrs(xdev: chan->xdev, sw_desc, prev: sw_desc, |
711 | dma_addr: &xt->src_start, num_src_addr: 1); |
712 | |
713 | hw_desc = &sw_desc->hw; |
714 | hsize = ALIGN(hsize, XILINX_DPDMA_LINESIZE_ALIGN_BITS / 8); |
715 | hw_desc->xfer_size = hsize * xt->numf; |
716 | hw_desc->hsize_stride = |
717 | FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK, hsize) | |
718 | FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK, |
719 | stride / 16); |
720 | hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE; |
721 | hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR; |
722 | hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE; |
723 | hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME; |
724 | |
725 | list_add_tail(new: &sw_desc->node, head: &tx_desc->descriptors); |
726 | |
727 | return tx_desc; |
728 | } |
729 | |
730 | /* ----------------------------------------------------------------------------- |
731 | * DPDMA Channel Operations |
732 | */ |
733 | |
734 | /** |
735 | * xilinx_dpdma_chan_enable - Enable the channel |
736 | * @chan: DPDMA channel |
737 | * |
738 | * Enable the channel and its interrupts. Set the QoS values for video class. |
739 | */ |
740 | static void xilinx_dpdma_chan_enable(struct xilinx_dpdma_chan *chan) |
741 | { |
742 | u32 reg; |
743 | |
744 | reg = (XILINX_DPDMA_INTR_CHAN_MASK << chan->id) |
745 | | XILINX_DPDMA_INTR_GLOBAL_MASK; |
746 | dpdma_write(base: chan->xdev->reg, XILINX_DPDMA_IEN, val: reg); |
747 | reg = (XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id) |
748 | | XILINX_DPDMA_INTR_GLOBAL_ERR; |
749 | dpdma_write(base: chan->xdev->reg, XILINX_DPDMA_EIEN, val: reg); |
750 | |
751 | reg = XILINX_DPDMA_CH_CNTL_ENABLE |
752 | | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_MASK, |
753 | XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS) |
754 | | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_MASK, |
755 | XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS) |
756 | | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_MASK, |
757 | XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS); |
758 | dpdma_set(base: chan->reg, XILINX_DPDMA_CH_CNTL, set: reg); |
759 | } |
760 | |
761 | /** |
762 | * xilinx_dpdma_chan_disable - Disable the channel |
763 | * @chan: DPDMA channel |
764 | * |
765 | * Disable the channel and its interrupts. |
766 | */ |
767 | static void xilinx_dpdma_chan_disable(struct xilinx_dpdma_chan *chan) |
768 | { |
769 | u32 reg; |
770 | |
771 | reg = XILINX_DPDMA_INTR_CHAN_MASK << chan->id; |
772 | dpdma_write(base: chan->xdev->reg, XILINX_DPDMA_IEN, val: reg); |
773 | reg = XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id; |
774 | dpdma_write(base: chan->xdev->reg, XILINX_DPDMA_EIEN, val: reg); |
775 | |
776 | dpdma_clr(base: chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE); |
777 | } |
778 | |
779 | /** |
780 | * xilinx_dpdma_chan_pause - Pause the channel |
781 | * @chan: DPDMA channel |
782 | * |
783 | * Pause the channel. |
784 | */ |
785 | static void xilinx_dpdma_chan_pause(struct xilinx_dpdma_chan *chan) |
786 | { |
787 | dpdma_set(base: chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE); |
788 | } |
789 | |
790 | /** |
791 | * xilinx_dpdma_chan_unpause - Unpause the channel |
792 | * @chan: DPDMA channel |
793 | * |
794 | * Unpause the channel. |
795 | */ |
796 | static void xilinx_dpdma_chan_unpause(struct xilinx_dpdma_chan *chan) |
797 | { |
798 | dpdma_clr(base: chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE); |
799 | } |
800 | |
801 | static u32 xilinx_dpdma_chan_video_group_ready(struct xilinx_dpdma_chan *chan) |
802 | { |
803 | struct xilinx_dpdma_device *xdev = chan->xdev; |
804 | u32 channels = 0; |
805 | unsigned int i; |
806 | |
807 | for (i = ZYNQMP_DPDMA_VIDEO0; i <= ZYNQMP_DPDMA_VIDEO2; i++) { |
808 | if (xdev->chan[i]->video_group && !xdev->chan[i]->running) |
809 | return 0; |
810 | |
811 | if (xdev->chan[i]->video_group) |
812 | channels |= BIT(i); |
813 | } |
814 | |
815 | return channels; |
816 | } |
817 | |
818 | /** |
819 | * xilinx_dpdma_chan_queue_transfer - Queue the next transfer |
820 | * @chan: DPDMA channel |
821 | * |
822 | * Queue the next descriptor, if any, to the hardware. If the channel is |
823 | * stopped, start it first. Otherwise retrigger it with the next descriptor. |
824 | */ |
825 | static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan) |
826 | { |
827 | struct xilinx_dpdma_device *xdev = chan->xdev; |
828 | struct xilinx_dpdma_sw_desc *sw_desc; |
829 | struct xilinx_dpdma_tx_desc *desc; |
830 | struct virt_dma_desc *vdesc; |
831 | u32 reg, channels; |
832 | bool first_frame; |
833 | |
834 | lockdep_assert_held(&chan->lock); |
835 | |
836 | if (chan->desc.pending) |
837 | return; |
838 | |
839 | if (!chan->running) { |
840 | xilinx_dpdma_chan_unpause(chan); |
841 | xilinx_dpdma_chan_enable(chan); |
842 | chan->first_frame = true; |
843 | chan->running = true; |
844 | } |
845 | |
846 | vdesc = vchan_next_desc(vc: &chan->vchan); |
847 | if (!vdesc) |
848 | return; |
849 | |
850 | desc = to_dpdma_tx_desc(vdesc); |
851 | chan->desc.pending = desc; |
852 | list_del(entry: &desc->vdesc.node); |
853 | |
854 | /* |
855 | * Assign the cookie to descriptors in this transaction. Only 16 bit |
856 | * will be used, but it should be enough. |
857 | */ |
858 | list_for_each_entry(sw_desc, &desc->descriptors, node) |
859 | sw_desc->hw.desc_id = desc->vdesc.tx.cookie |
860 | & XILINX_DPDMA_CH_DESC_ID_MASK; |
861 | |
862 | sw_desc = list_first_entry(&desc->descriptors, |
863 | struct xilinx_dpdma_sw_desc, node); |
864 | dpdma_write(base: chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR, |
865 | lower_32_bits(sw_desc->dma_addr)); |
866 | if (xdev->ext_addr) |
867 | dpdma_write(base: chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE, |
868 | FIELD_PREP(XILINX_DPDMA_CH_DESC_START_ADDRE_MASK, |
869 | upper_32_bits(sw_desc->dma_addr))); |
870 | |
871 | first_frame = chan->first_frame; |
872 | chan->first_frame = false; |
873 | |
874 | if (chan->video_group) { |
875 | channels = xilinx_dpdma_chan_video_group_ready(chan); |
876 | /* |
877 | * Trigger the transfer only when all channels in the group are |
878 | * ready. |
879 | */ |
880 | if (!channels) |
881 | return; |
882 | } else { |
883 | channels = BIT(chan->id); |
884 | } |
885 | |
886 | if (first_frame) |
887 | reg = XILINX_DPDMA_GBL_TRIG_MASK(channels); |
888 | else |
889 | reg = XILINX_DPDMA_GBL_RETRIG_MASK(channels); |
890 | |
891 | dpdma_write(base: xdev->reg, XILINX_DPDMA_GBL, val: reg); |
892 | } |
893 | |
894 | /** |
895 | * xilinx_dpdma_chan_ostand - Number of outstanding transactions |
896 | * @chan: DPDMA channel |
897 | * |
898 | * Read and return the number of outstanding transactions from register. |
899 | * |
900 | * Return: Number of outstanding transactions from the status register. |
901 | */ |
902 | static u32 xilinx_dpdma_chan_ostand(struct xilinx_dpdma_chan *chan) |
903 | { |
904 | return FIELD_GET(XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK, |
905 | dpdma_read(chan->reg, XILINX_DPDMA_CH_STATUS)); |
906 | } |
907 | |
908 | /** |
909 | * xilinx_dpdma_chan_notify_no_ostand - Notify no outstanding transaction event |
910 | * @chan: DPDMA channel |
911 | * |
912 | * Notify waiters for no outstanding event, so waiters can stop the channel |
913 | * safely. This function is supposed to be called when 'no outstanding' |
914 | * interrupt is generated. The 'no outstanding' interrupt is disabled and |
915 | * should be re-enabled when this event is handled. If the channel status |
916 | * register still shows some number of outstanding transactions, the interrupt |
917 | * remains enabled. |
918 | * |
919 | * Return: 0 on success. On failure, -EWOULDBLOCK if there's still outstanding |
920 | * transaction(s). |
921 | */ |
922 | static int xilinx_dpdma_chan_notify_no_ostand(struct xilinx_dpdma_chan *chan) |
923 | { |
924 | u32 cnt; |
925 | |
926 | cnt = xilinx_dpdma_chan_ostand(chan); |
927 | if (cnt) { |
928 | dev_dbg(chan->xdev->dev, |
929 | "chan%u: %d outstanding transactions\n" , |
930 | chan->id, cnt); |
931 | return -EWOULDBLOCK; |
932 | } |
933 | |
934 | /* Disable 'no outstanding' interrupt */ |
935 | dpdma_write(base: chan->xdev->reg, XILINX_DPDMA_IDS, |
936 | XILINX_DPDMA_INTR_NO_OSTAND(chan->id)); |
937 | wake_up(&chan->wait_to_stop); |
938 | |
939 | return 0; |
940 | } |
941 | |
942 | /** |
943 | * xilinx_dpdma_chan_wait_no_ostand - Wait for the no outstanding irq |
944 | * @chan: DPDMA channel |
945 | * |
946 | * Wait for the no outstanding transaction interrupt. This functions can sleep |
947 | * for 50ms. |
948 | * |
949 | * Return: 0 on success. On failure, -ETIMEOUT for time out, or the error code |
950 | * from wait_event_interruptible_timeout(). |
951 | */ |
952 | static int xilinx_dpdma_chan_wait_no_ostand(struct xilinx_dpdma_chan *chan) |
953 | { |
954 | int ret; |
955 | |
956 | /* Wait for a no outstanding transaction interrupt upto 50msec */ |
957 | ret = wait_event_interruptible_timeout(chan->wait_to_stop, |
958 | !xilinx_dpdma_chan_ostand(chan), |
959 | msecs_to_jiffies(50)); |
960 | if (ret > 0) { |
961 | dpdma_write(base: chan->xdev->reg, XILINX_DPDMA_IEN, |
962 | XILINX_DPDMA_INTR_NO_OSTAND(chan->id)); |
963 | return 0; |
964 | } |
965 | |
966 | dev_err(chan->xdev->dev, "chan%u: not ready to stop: %d trans\n" , |
967 | chan->id, xilinx_dpdma_chan_ostand(chan)); |
968 | |
969 | if (ret == 0) |
970 | return -ETIMEDOUT; |
971 | |
972 | return ret; |
973 | } |
974 | |
975 | /** |
976 | * xilinx_dpdma_chan_poll_no_ostand - Poll the outstanding transaction status |
977 | * @chan: DPDMA channel |
978 | * |
979 | * Poll the outstanding transaction status, and return when there's no |
980 | * outstanding transaction. This functions can be used in the interrupt context |
981 | * or where the atomicity is required. Calling thread may wait more than 50ms. |
982 | * |
983 | * Return: 0 on success, or -ETIMEDOUT. |
984 | */ |
985 | static int xilinx_dpdma_chan_poll_no_ostand(struct xilinx_dpdma_chan *chan) |
986 | { |
987 | u32 cnt, loop = 50000; |
988 | |
989 | /* Poll at least for 50ms (20 fps). */ |
990 | do { |
991 | cnt = xilinx_dpdma_chan_ostand(chan); |
992 | udelay(1); |
993 | } while (loop-- > 0 && cnt); |
994 | |
995 | if (loop) { |
996 | dpdma_write(base: chan->xdev->reg, XILINX_DPDMA_IEN, |
997 | XILINX_DPDMA_INTR_NO_OSTAND(chan->id)); |
998 | return 0; |
999 | } |
1000 | |
1001 | dev_err(chan->xdev->dev, "chan%u: not ready to stop: %d trans\n" , |
1002 | chan->id, xilinx_dpdma_chan_ostand(chan)); |
1003 | |
1004 | return -ETIMEDOUT; |
1005 | } |
1006 | |
1007 | /** |
1008 | * xilinx_dpdma_chan_stop - Stop the channel |
1009 | * @chan: DPDMA channel |
1010 | * |
1011 | * Stop a previously paused channel by first waiting for completion of all |
1012 | * outstanding transaction and then disabling the channel. |
1013 | * |
1014 | * Return: 0 on success, or -ETIMEDOUT if the channel failed to stop. |
1015 | */ |
1016 | static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan) |
1017 | { |
1018 | unsigned long flags; |
1019 | int ret; |
1020 | |
1021 | ret = xilinx_dpdma_chan_wait_no_ostand(chan); |
1022 | if (ret) |
1023 | return ret; |
1024 | |
1025 | spin_lock_irqsave(&chan->lock, flags); |
1026 | xilinx_dpdma_chan_disable(chan); |
1027 | chan->running = false; |
1028 | spin_unlock_irqrestore(lock: &chan->lock, flags); |
1029 | |
1030 | return 0; |
1031 | } |
1032 | |
1033 | /** |
1034 | * xilinx_dpdma_chan_done_irq - Handle hardware descriptor completion |
1035 | * @chan: DPDMA channel |
1036 | * |
1037 | * Handle completion of the currently active descriptor (@chan->desc.active). As |
1038 | * we currently support cyclic transfers only, this just invokes the cyclic |
1039 | * callback. The descriptor will be completed at the VSYNC interrupt when a new |
1040 | * descriptor replaces it. |
1041 | */ |
1042 | static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan) |
1043 | { |
1044 | struct xilinx_dpdma_tx_desc *active; |
1045 | unsigned long flags; |
1046 | |
1047 | spin_lock_irqsave(&chan->lock, flags); |
1048 | |
1049 | xilinx_dpdma_debugfs_desc_done_irq(chan); |
1050 | |
1051 | active = chan->desc.active; |
1052 | if (active) |
1053 | vchan_cyclic_callback(vd: &active->vdesc); |
1054 | else |
1055 | dev_warn(chan->xdev->dev, |
1056 | "chan%u: DONE IRQ with no active descriptor!\n" , |
1057 | chan->id); |
1058 | |
1059 | spin_unlock_irqrestore(lock: &chan->lock, flags); |
1060 | } |
1061 | |
1062 | /** |
1063 | * xilinx_dpdma_chan_vsync_irq - Handle hardware descriptor scheduling |
1064 | * @chan: DPDMA channel |
1065 | * |
1066 | * At VSYNC the active descriptor may have been replaced by the pending |
1067 | * descriptor. Detect this through the DESC_ID and perform appropriate |
1068 | * bookkeeping. |
1069 | */ |
1070 | static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan) |
1071 | { |
1072 | struct xilinx_dpdma_tx_desc *pending; |
1073 | struct xilinx_dpdma_sw_desc *sw_desc; |
1074 | unsigned long flags; |
1075 | u32 desc_id; |
1076 | |
1077 | spin_lock_irqsave(&chan->lock, flags); |
1078 | |
1079 | pending = chan->desc.pending; |
1080 | if (!chan->running || !pending) |
1081 | goto out; |
1082 | |
1083 | desc_id = dpdma_read(base: chan->reg, XILINX_DPDMA_CH_DESC_ID) |
1084 | & XILINX_DPDMA_CH_DESC_ID_MASK; |
1085 | |
1086 | /* If the retrigger raced with vsync, retry at the next frame. */ |
1087 | sw_desc = list_first_entry(&pending->descriptors, |
1088 | struct xilinx_dpdma_sw_desc, node); |
1089 | if (sw_desc->hw.desc_id != desc_id) { |
1090 | dev_dbg(chan->xdev->dev, |
1091 | "chan%u: vsync race lost (%u != %u), retrying\n" , |
1092 | chan->id, sw_desc->hw.desc_id, desc_id); |
1093 | goto out; |
1094 | } |
1095 | |
1096 | /* |
1097 | * Complete the active descriptor, if any, promote the pending |
1098 | * descriptor to active, and queue the next transfer, if any. |
1099 | */ |
1100 | if (chan->desc.active) |
1101 | vchan_cookie_complete(vd: &chan->desc.active->vdesc); |
1102 | chan->desc.active = pending; |
1103 | chan->desc.pending = NULL; |
1104 | |
1105 | xilinx_dpdma_chan_queue_transfer(chan); |
1106 | |
1107 | out: |
1108 | spin_unlock_irqrestore(lock: &chan->lock, flags); |
1109 | } |
1110 | |
1111 | /** |
1112 | * xilinx_dpdma_chan_err - Detect any channel error |
1113 | * @chan: DPDMA channel |
1114 | * @isr: masked Interrupt Status Register |
1115 | * @eisr: Error Interrupt Status Register |
1116 | * |
1117 | * Return: true if any channel error occurs, or false otherwise. |
1118 | */ |
1119 | static bool |
1120 | xilinx_dpdma_chan_err(struct xilinx_dpdma_chan *chan, u32 isr, u32 eisr) |
1121 | { |
1122 | if (!chan) |
1123 | return false; |
1124 | |
1125 | if (chan->running && |
1126 | ((isr & (XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id)) || |
1127 | (eisr & (XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id)))) |
1128 | return true; |
1129 | |
1130 | return false; |
1131 | } |
1132 | |
1133 | /** |
1134 | * xilinx_dpdma_chan_handle_err - DPDMA channel error handling |
1135 | * @chan: DPDMA channel |
1136 | * |
1137 | * This function is called when any channel error or any global error occurs. |
1138 | * The function disables the paused channel by errors and determines |
1139 | * if the current active descriptor can be rescheduled depending on |
1140 | * the descriptor status. |
1141 | */ |
1142 | static void xilinx_dpdma_chan_handle_err(struct xilinx_dpdma_chan *chan) |
1143 | { |
1144 | struct xilinx_dpdma_device *xdev = chan->xdev; |
1145 | struct xilinx_dpdma_tx_desc *active; |
1146 | unsigned long flags; |
1147 | |
1148 | spin_lock_irqsave(&chan->lock, flags); |
1149 | |
1150 | dev_dbg(xdev->dev, "chan%u: cur desc addr = 0x%04x%08x\n" , |
1151 | chan->id, |
1152 | dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE), |
1153 | dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR)); |
1154 | dev_dbg(xdev->dev, "chan%u: cur payload addr = 0x%04x%08x\n" , |
1155 | chan->id, |
1156 | dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDRE), |
1157 | dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDR)); |
1158 | |
1159 | xilinx_dpdma_chan_disable(chan); |
1160 | chan->running = false; |
1161 | |
1162 | if (!chan->desc.active) |
1163 | goto out_unlock; |
1164 | |
1165 | active = chan->desc.active; |
1166 | chan->desc.active = NULL; |
1167 | |
1168 | xilinx_dpdma_chan_dump_tx_desc(chan, tx_desc: active); |
1169 | |
1170 | if (active->error) |
1171 | dev_dbg(xdev->dev, "chan%u: repeated error on desc\n" , |
1172 | chan->id); |
1173 | |
1174 | /* Reschedule if there's no new descriptor */ |
1175 | if (!chan->desc.pending && |
1176 | list_empty(head: &chan->vchan.desc_issued)) { |
1177 | active->error = true; |
1178 | list_add_tail(new: &active->vdesc.node, |
1179 | head: &chan->vchan.desc_issued); |
1180 | } else { |
1181 | xilinx_dpdma_chan_free_tx_desc(vdesc: &active->vdesc); |
1182 | } |
1183 | |
1184 | out_unlock: |
1185 | spin_unlock_irqrestore(lock: &chan->lock, flags); |
1186 | } |
1187 | |
1188 | /* ----------------------------------------------------------------------------- |
1189 | * DMA Engine Operations |
1190 | */ |
1191 | |
1192 | static struct dma_async_tx_descriptor * |
1193 | xilinx_dpdma_prep_interleaved_dma(struct dma_chan *dchan, |
1194 | struct dma_interleaved_template *xt, |
1195 | unsigned long flags) |
1196 | { |
1197 | struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan); |
1198 | struct xilinx_dpdma_tx_desc *desc; |
1199 | |
1200 | if (xt->dir != DMA_MEM_TO_DEV) |
1201 | return NULL; |
1202 | |
1203 | if (!xt->numf || !xt->sgl[0].size) |
1204 | return NULL; |
1205 | |
1206 | if (!(flags & DMA_PREP_REPEAT) || !(flags & DMA_PREP_LOAD_EOT)) |
1207 | return NULL; |
1208 | |
1209 | desc = xilinx_dpdma_chan_prep_interleaved_dma(chan, xt); |
1210 | if (!desc) |
1211 | return NULL; |
1212 | |
1213 | vchan_tx_prep(vc: &chan->vchan, vd: &desc->vdesc, tx_flags: flags | DMA_CTRL_ACK); |
1214 | |
1215 | return &desc->vdesc.tx; |
1216 | } |
1217 | |
1218 | /** |
1219 | * xilinx_dpdma_alloc_chan_resources - Allocate resources for the channel |
1220 | * @dchan: DMA channel |
1221 | * |
1222 | * Allocate a descriptor pool for the channel. |
1223 | * |
1224 | * Return: 0 on success, or -ENOMEM if failed to allocate a pool. |
1225 | */ |
1226 | static int xilinx_dpdma_alloc_chan_resources(struct dma_chan *dchan) |
1227 | { |
1228 | struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan); |
1229 | size_t align = __alignof__(struct xilinx_dpdma_sw_desc); |
1230 | |
1231 | chan->desc_pool = dma_pool_create(name: dev_name(dev: chan->xdev->dev), |
1232 | dev: chan->xdev->dev, |
1233 | size: sizeof(struct xilinx_dpdma_sw_desc), |
1234 | align, allocation: 0); |
1235 | if (!chan->desc_pool) { |
1236 | dev_err(chan->xdev->dev, |
1237 | "chan%u: failed to allocate a descriptor pool\n" , |
1238 | chan->id); |
1239 | return -ENOMEM; |
1240 | } |
1241 | |
1242 | return 0; |
1243 | } |
1244 | |
1245 | /** |
1246 | * xilinx_dpdma_free_chan_resources - Free all resources for the channel |
1247 | * @dchan: DMA channel |
1248 | * |
1249 | * Free resources associated with the virtual DMA channel, and destroy the |
1250 | * descriptor pool. |
1251 | */ |
1252 | static void xilinx_dpdma_free_chan_resources(struct dma_chan *dchan) |
1253 | { |
1254 | struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan); |
1255 | |
1256 | vchan_free_chan_resources(vc: &chan->vchan); |
1257 | |
1258 | dma_pool_destroy(pool: chan->desc_pool); |
1259 | chan->desc_pool = NULL; |
1260 | } |
1261 | |
1262 | static void xilinx_dpdma_issue_pending(struct dma_chan *dchan) |
1263 | { |
1264 | struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan); |
1265 | unsigned long flags; |
1266 | |
1267 | spin_lock_irqsave(&chan->vchan.lock, flags); |
1268 | if (vchan_issue_pending(vc: &chan->vchan)) |
1269 | xilinx_dpdma_chan_queue_transfer(chan); |
1270 | spin_unlock_irqrestore(lock: &chan->vchan.lock, flags); |
1271 | } |
1272 | |
1273 | static int xilinx_dpdma_config(struct dma_chan *dchan, |
1274 | struct dma_slave_config *config) |
1275 | { |
1276 | struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan); |
1277 | struct xilinx_dpdma_peripheral_config *pconfig; |
1278 | unsigned long flags; |
1279 | |
1280 | /* |
1281 | * The destination address doesn't need to be specified as the DPDMA is |
1282 | * hardwired to the destination (the DP controller). The transfer |
1283 | * width, burst size and port window size are thus meaningless, they're |
1284 | * fixed both on the DPDMA side and on the DP controller side. |
1285 | */ |
1286 | |
1287 | /* |
1288 | * Use the peripheral_config to indicate that the channel is part |
1289 | * of a video group. This requires matching use of the custom |
1290 | * structure in each driver. |
1291 | */ |
1292 | pconfig = config->peripheral_config; |
1293 | if (WARN_ON(pconfig && config->peripheral_size != sizeof(*pconfig))) |
1294 | return -EINVAL; |
1295 | |
1296 | spin_lock_irqsave(&chan->lock, flags); |
1297 | if (chan->id <= ZYNQMP_DPDMA_VIDEO2 && pconfig) |
1298 | chan->video_group = pconfig->video_group; |
1299 | spin_unlock_irqrestore(lock: &chan->lock, flags); |
1300 | |
1301 | return 0; |
1302 | } |
1303 | |
1304 | static int xilinx_dpdma_pause(struct dma_chan *dchan) |
1305 | { |
1306 | xilinx_dpdma_chan_pause(to_xilinx_chan(dchan)); |
1307 | |
1308 | return 0; |
1309 | } |
1310 | |
1311 | static int xilinx_dpdma_resume(struct dma_chan *dchan) |
1312 | { |
1313 | xilinx_dpdma_chan_unpause(to_xilinx_chan(dchan)); |
1314 | |
1315 | return 0; |
1316 | } |
1317 | |
1318 | /** |
1319 | * xilinx_dpdma_terminate_all - Terminate the channel and descriptors |
1320 | * @dchan: DMA channel |
1321 | * |
1322 | * Pause the channel without waiting for ongoing transfers to complete. Waiting |
1323 | * for completion is performed by xilinx_dpdma_synchronize() that will disable |
1324 | * the channel to complete the stop. |
1325 | * |
1326 | * All the descriptors associated with the channel that are guaranteed not to |
1327 | * be touched by the hardware. The pending and active descriptor are not |
1328 | * touched, and will be freed either upon completion, or by |
1329 | * xilinx_dpdma_synchronize(). |
1330 | * |
1331 | * Return: 0 on success, or -ETIMEDOUT if the channel failed to stop. |
1332 | */ |
1333 | static int xilinx_dpdma_terminate_all(struct dma_chan *dchan) |
1334 | { |
1335 | struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan); |
1336 | struct xilinx_dpdma_device *xdev = chan->xdev; |
1337 | LIST_HEAD(descriptors); |
1338 | unsigned long flags; |
1339 | unsigned int i; |
1340 | |
1341 | /* Pause the channel (including the whole video group if applicable). */ |
1342 | if (chan->video_group) { |
1343 | for (i = ZYNQMP_DPDMA_VIDEO0; i <= ZYNQMP_DPDMA_VIDEO2; i++) { |
1344 | if (xdev->chan[i]->video_group && |
1345 | xdev->chan[i]->running) { |
1346 | xilinx_dpdma_chan_pause(chan: xdev->chan[i]); |
1347 | xdev->chan[i]->video_group = false; |
1348 | } |
1349 | } |
1350 | } else { |
1351 | xilinx_dpdma_chan_pause(chan); |
1352 | } |
1353 | |
1354 | /* Gather all the descriptors we can free and free them. */ |
1355 | spin_lock_irqsave(&chan->vchan.lock, flags); |
1356 | vchan_get_all_descriptors(vc: &chan->vchan, head: &descriptors); |
1357 | spin_unlock_irqrestore(lock: &chan->vchan.lock, flags); |
1358 | |
1359 | vchan_dma_desc_free_list(vc: &chan->vchan, head: &descriptors); |
1360 | |
1361 | return 0; |
1362 | } |
1363 | |
1364 | /** |
1365 | * xilinx_dpdma_synchronize - Synchronize callback execution |
1366 | * @dchan: DMA channel |
1367 | * |
1368 | * Synchronizing callback execution ensures that all previously issued |
1369 | * transfers have completed and all associated callbacks have been called and |
1370 | * have returned. |
1371 | * |
1372 | * This function waits for the DMA channel to stop. It assumes it has been |
1373 | * paused by a previous call to dmaengine_terminate_async(), and that no new |
1374 | * pending descriptors have been issued with dma_async_issue_pending(). The |
1375 | * behaviour is undefined otherwise. |
1376 | */ |
1377 | static void xilinx_dpdma_synchronize(struct dma_chan *dchan) |
1378 | { |
1379 | struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan); |
1380 | unsigned long flags; |
1381 | |
1382 | xilinx_dpdma_chan_stop(chan); |
1383 | |
1384 | spin_lock_irqsave(&chan->vchan.lock, flags); |
1385 | if (chan->desc.pending) { |
1386 | vchan_terminate_vdesc(vd: &chan->desc.pending->vdesc); |
1387 | chan->desc.pending = NULL; |
1388 | } |
1389 | if (chan->desc.active) { |
1390 | vchan_terminate_vdesc(vd: &chan->desc.active->vdesc); |
1391 | chan->desc.active = NULL; |
1392 | } |
1393 | spin_unlock_irqrestore(lock: &chan->vchan.lock, flags); |
1394 | |
1395 | vchan_synchronize(vc: &chan->vchan); |
1396 | } |
1397 | |
1398 | /* ----------------------------------------------------------------------------- |
1399 | * Interrupt and Tasklet Handling |
1400 | */ |
1401 | |
1402 | /** |
1403 | * xilinx_dpdma_err - Detect any global error |
1404 | * @isr: Interrupt Status Register |
1405 | * @eisr: Error Interrupt Status Register |
1406 | * |
1407 | * Return: True if any global error occurs, or false otherwise. |
1408 | */ |
1409 | static bool xilinx_dpdma_err(u32 isr, u32 eisr) |
1410 | { |
1411 | if (isr & XILINX_DPDMA_INTR_GLOBAL_ERR || |
1412 | eisr & XILINX_DPDMA_EINTR_GLOBAL_ERR) |
1413 | return true; |
1414 | |
1415 | return false; |
1416 | } |
1417 | |
1418 | /** |
1419 | * xilinx_dpdma_handle_err_irq - Handle DPDMA error interrupt |
1420 | * @xdev: DPDMA device |
1421 | * @isr: masked Interrupt Status Register |
1422 | * @eisr: Error Interrupt Status Register |
1423 | * |
1424 | * Handle if any error occurs based on @isr and @eisr. This function disables |
1425 | * corresponding error interrupts, and those should be re-enabled once handling |
1426 | * is done. |
1427 | */ |
1428 | static void xilinx_dpdma_handle_err_irq(struct xilinx_dpdma_device *xdev, |
1429 | u32 isr, u32 eisr) |
1430 | { |
1431 | bool err = xilinx_dpdma_err(isr, eisr); |
1432 | unsigned int i; |
1433 | |
1434 | dev_dbg_ratelimited(xdev->dev, |
1435 | "error irq: isr = 0x%08x, eisr = 0x%08x\n" , |
1436 | isr, eisr); |
1437 | |
1438 | /* Disable channel error interrupts until errors are handled. */ |
1439 | dpdma_write(base: xdev->reg, XILINX_DPDMA_IDS, |
1440 | val: isr & ~XILINX_DPDMA_INTR_GLOBAL_ERR); |
1441 | dpdma_write(base: xdev->reg, XILINX_DPDMA_EIDS, |
1442 | val: eisr & ~XILINX_DPDMA_EINTR_GLOBAL_ERR); |
1443 | |
1444 | for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) |
1445 | if (err || xilinx_dpdma_chan_err(chan: xdev->chan[i], isr, eisr)) |
1446 | tasklet_schedule(t: &xdev->chan[i]->err_task); |
1447 | } |
1448 | |
1449 | /** |
1450 | * xilinx_dpdma_enable_irq - Enable interrupts |
1451 | * @xdev: DPDMA device |
1452 | * |
1453 | * Enable interrupts. |
1454 | */ |
1455 | static void xilinx_dpdma_enable_irq(struct xilinx_dpdma_device *xdev) |
1456 | { |
1457 | dpdma_write(base: xdev->reg, XILINX_DPDMA_IEN, XILINX_DPDMA_INTR_ALL); |
1458 | dpdma_write(base: xdev->reg, XILINX_DPDMA_EIEN, XILINX_DPDMA_EINTR_ALL); |
1459 | } |
1460 | |
1461 | /** |
1462 | * xilinx_dpdma_disable_irq - Disable interrupts |
1463 | * @xdev: DPDMA device |
1464 | * |
1465 | * Disable interrupts. |
1466 | */ |
1467 | static void xilinx_dpdma_disable_irq(struct xilinx_dpdma_device *xdev) |
1468 | { |
1469 | dpdma_write(base: xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ALL); |
1470 | dpdma_write(base: xdev->reg, XILINX_DPDMA_EIDS, XILINX_DPDMA_EINTR_ALL); |
1471 | } |
1472 | |
1473 | /** |
1474 | * xilinx_dpdma_chan_err_task - Per channel tasklet for error handling |
1475 | * @t: pointer to the tasklet associated with this handler |
1476 | * |
1477 | * Per channel error handling tasklet. This function waits for the outstanding |
1478 | * transaction to complete and triggers error handling. After error handling, |
1479 | * re-enable channel error interrupts, and restart the channel if needed. |
1480 | */ |
1481 | static void xilinx_dpdma_chan_err_task(struct tasklet_struct *t) |
1482 | { |
1483 | struct xilinx_dpdma_chan *chan = from_tasklet(chan, t, err_task); |
1484 | struct xilinx_dpdma_device *xdev = chan->xdev; |
1485 | unsigned long flags; |
1486 | |
1487 | /* Proceed error handling even when polling fails. */ |
1488 | xilinx_dpdma_chan_poll_no_ostand(chan); |
1489 | |
1490 | xilinx_dpdma_chan_handle_err(chan); |
1491 | |
1492 | dpdma_write(base: xdev->reg, XILINX_DPDMA_IEN, |
1493 | XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id); |
1494 | dpdma_write(base: xdev->reg, XILINX_DPDMA_EIEN, |
1495 | XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id); |
1496 | |
1497 | spin_lock_irqsave(&chan->lock, flags); |
1498 | xilinx_dpdma_chan_queue_transfer(chan); |
1499 | spin_unlock_irqrestore(lock: &chan->lock, flags); |
1500 | } |
1501 | |
1502 | static irqreturn_t xilinx_dpdma_irq_handler(int irq, void *data) |
1503 | { |
1504 | struct xilinx_dpdma_device *xdev = data; |
1505 | unsigned long mask; |
1506 | unsigned int i; |
1507 | u32 status; |
1508 | u32 error; |
1509 | |
1510 | status = dpdma_read(base: xdev->reg, XILINX_DPDMA_ISR); |
1511 | error = dpdma_read(base: xdev->reg, XILINX_DPDMA_EISR); |
1512 | if (!status && !error) |
1513 | return IRQ_NONE; |
1514 | |
1515 | dpdma_write(base: xdev->reg, XILINX_DPDMA_ISR, val: status); |
1516 | dpdma_write(base: xdev->reg, XILINX_DPDMA_EISR, val: error); |
1517 | |
1518 | if (status & XILINX_DPDMA_INTR_VSYNC) { |
1519 | /* |
1520 | * There's a single VSYNC interrupt that needs to be processed |
1521 | * by each running channel to update the active descriptor. |
1522 | */ |
1523 | for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) { |
1524 | struct xilinx_dpdma_chan *chan = xdev->chan[i]; |
1525 | |
1526 | if (chan) |
1527 | xilinx_dpdma_chan_vsync_irq(chan); |
1528 | } |
1529 | } |
1530 | |
1531 | mask = FIELD_GET(XILINX_DPDMA_INTR_DESC_DONE_MASK, status); |
1532 | if (mask) { |
1533 | for_each_set_bit(i, &mask, ARRAY_SIZE(xdev->chan)) |
1534 | xilinx_dpdma_chan_done_irq(chan: xdev->chan[i]); |
1535 | } |
1536 | |
1537 | mask = FIELD_GET(XILINX_DPDMA_INTR_NO_OSTAND_MASK, status); |
1538 | if (mask) { |
1539 | for_each_set_bit(i, &mask, ARRAY_SIZE(xdev->chan)) |
1540 | xilinx_dpdma_chan_notify_no_ostand(chan: xdev->chan[i]); |
1541 | } |
1542 | |
1543 | mask = status & XILINX_DPDMA_INTR_ERR_ALL; |
1544 | if (mask || error) |
1545 | xilinx_dpdma_handle_err_irq(xdev, isr: mask, eisr: error); |
1546 | |
1547 | return IRQ_HANDLED; |
1548 | } |
1549 | |
1550 | /* ----------------------------------------------------------------------------- |
1551 | * Initialization & Cleanup |
1552 | */ |
1553 | |
1554 | static int xilinx_dpdma_chan_init(struct xilinx_dpdma_device *xdev, |
1555 | unsigned int chan_id) |
1556 | { |
1557 | struct xilinx_dpdma_chan *chan; |
1558 | |
1559 | chan = devm_kzalloc(dev: xdev->dev, size: sizeof(*chan), GFP_KERNEL); |
1560 | if (!chan) |
1561 | return -ENOMEM; |
1562 | |
1563 | chan->id = chan_id; |
1564 | chan->reg = xdev->reg + XILINX_DPDMA_CH_BASE |
1565 | + XILINX_DPDMA_CH_OFFSET * chan->id; |
1566 | chan->running = false; |
1567 | chan->xdev = xdev; |
1568 | |
1569 | spin_lock_init(&chan->lock); |
1570 | init_waitqueue_head(&chan->wait_to_stop); |
1571 | |
1572 | tasklet_setup(t: &chan->err_task, callback: xilinx_dpdma_chan_err_task); |
1573 | |
1574 | chan->vchan.desc_free = xilinx_dpdma_chan_free_tx_desc; |
1575 | vchan_init(vc: &chan->vchan, dmadev: &xdev->common); |
1576 | |
1577 | xdev->chan[chan->id] = chan; |
1578 | |
1579 | return 0; |
1580 | } |
1581 | |
1582 | static void xilinx_dpdma_chan_remove(struct xilinx_dpdma_chan *chan) |
1583 | { |
1584 | if (!chan) |
1585 | return; |
1586 | |
1587 | tasklet_kill(t: &chan->err_task); |
1588 | list_del(entry: &chan->vchan.chan.device_node); |
1589 | } |
1590 | |
1591 | static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, |
1592 | struct of_dma *ofdma) |
1593 | { |
1594 | struct xilinx_dpdma_device *xdev = ofdma->of_dma_data; |
1595 | u32 chan_id = dma_spec->args[0]; |
1596 | |
1597 | if (chan_id >= ARRAY_SIZE(xdev->chan)) |
1598 | return NULL; |
1599 | |
1600 | if (!xdev->chan[chan_id]) |
1601 | return NULL; |
1602 | |
1603 | return dma_get_slave_channel(chan: &xdev->chan[chan_id]->vchan.chan); |
1604 | } |
1605 | |
1606 | static void dpdma_hw_init(struct xilinx_dpdma_device *xdev) |
1607 | { |
1608 | unsigned int i; |
1609 | void __iomem *reg; |
1610 | |
1611 | /* Disable all interrupts */ |
1612 | xilinx_dpdma_disable_irq(xdev); |
1613 | |
1614 | /* Stop all channels */ |
1615 | for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) { |
1616 | reg = xdev->reg + XILINX_DPDMA_CH_BASE |
1617 | + XILINX_DPDMA_CH_OFFSET * i; |
1618 | dpdma_clr(base: reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE); |
1619 | } |
1620 | |
1621 | /* Clear the interrupt status registers */ |
1622 | dpdma_write(base: xdev->reg, XILINX_DPDMA_ISR, XILINX_DPDMA_INTR_ALL); |
1623 | dpdma_write(base: xdev->reg, XILINX_DPDMA_EISR, XILINX_DPDMA_EINTR_ALL); |
1624 | } |
1625 | |
1626 | static int xilinx_dpdma_probe(struct platform_device *pdev) |
1627 | { |
1628 | struct xilinx_dpdma_device *xdev; |
1629 | struct dma_device *ddev; |
1630 | unsigned int i; |
1631 | int ret; |
1632 | |
1633 | xdev = devm_kzalloc(dev: &pdev->dev, size: sizeof(*xdev), GFP_KERNEL); |
1634 | if (!xdev) |
1635 | return -ENOMEM; |
1636 | |
1637 | xdev->dev = &pdev->dev; |
1638 | xdev->ext_addr = sizeof(dma_addr_t) > 4; |
1639 | |
1640 | INIT_LIST_HEAD(list: &xdev->common.channels); |
1641 | |
1642 | platform_set_drvdata(pdev, data: xdev); |
1643 | |
1644 | xdev->axi_clk = devm_clk_get(dev: xdev->dev, id: "axi_clk" ); |
1645 | if (IS_ERR(ptr: xdev->axi_clk)) |
1646 | return PTR_ERR(ptr: xdev->axi_clk); |
1647 | |
1648 | xdev->reg = devm_platform_ioremap_resource(pdev, index: 0); |
1649 | if (IS_ERR(ptr: xdev->reg)) |
1650 | return PTR_ERR(ptr: xdev->reg); |
1651 | |
1652 | dpdma_hw_init(xdev); |
1653 | |
1654 | xdev->irq = platform_get_irq(pdev, 0); |
1655 | if (xdev->irq < 0) |
1656 | return xdev->irq; |
1657 | |
1658 | ret = request_irq(irq: xdev->irq, handler: xilinx_dpdma_irq_handler, IRQF_SHARED, |
1659 | name: dev_name(dev: xdev->dev), dev: xdev); |
1660 | if (ret) { |
1661 | dev_err(xdev->dev, "failed to request IRQ\n" ); |
1662 | return ret; |
1663 | } |
1664 | |
1665 | ddev = &xdev->common; |
1666 | ddev->dev = &pdev->dev; |
1667 | |
1668 | dma_cap_set(DMA_SLAVE, ddev->cap_mask); |
1669 | dma_cap_set(DMA_PRIVATE, ddev->cap_mask); |
1670 | dma_cap_set(DMA_INTERLEAVE, ddev->cap_mask); |
1671 | dma_cap_set(DMA_REPEAT, ddev->cap_mask); |
1672 | dma_cap_set(DMA_LOAD_EOT, ddev->cap_mask); |
1673 | ddev->copy_align = fls(XILINX_DPDMA_ALIGN_BYTES - 1); |
1674 | |
1675 | ddev->device_alloc_chan_resources = xilinx_dpdma_alloc_chan_resources; |
1676 | ddev->device_free_chan_resources = xilinx_dpdma_free_chan_resources; |
1677 | ddev->device_prep_interleaved_dma = xilinx_dpdma_prep_interleaved_dma; |
1678 | /* TODO: Can we achieve better granularity ? */ |
1679 | ddev->device_tx_status = dma_cookie_status; |
1680 | ddev->device_issue_pending = xilinx_dpdma_issue_pending; |
1681 | ddev->device_config = xilinx_dpdma_config; |
1682 | ddev->device_pause = xilinx_dpdma_pause; |
1683 | ddev->device_resume = xilinx_dpdma_resume; |
1684 | ddev->device_terminate_all = xilinx_dpdma_terminate_all; |
1685 | ddev->device_synchronize = xilinx_dpdma_synchronize; |
1686 | ddev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED); |
1687 | ddev->directions = BIT(DMA_MEM_TO_DEV); |
1688 | ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; |
1689 | |
1690 | for (i = 0; i < ARRAY_SIZE(xdev->chan); ++i) { |
1691 | ret = xilinx_dpdma_chan_init(xdev, chan_id: i); |
1692 | if (ret < 0) { |
1693 | dev_err(xdev->dev, "failed to initialize channel %u\n" , |
1694 | i); |
1695 | goto error; |
1696 | } |
1697 | } |
1698 | |
1699 | ret = clk_prepare_enable(clk: xdev->axi_clk); |
1700 | if (ret) { |
1701 | dev_err(xdev->dev, "failed to enable the axi clock\n" ); |
1702 | goto error; |
1703 | } |
1704 | |
1705 | ret = dma_async_device_register(device: ddev); |
1706 | if (ret) { |
1707 | dev_err(xdev->dev, "failed to register the dma device\n" ); |
1708 | goto error_dma_async; |
1709 | } |
1710 | |
1711 | ret = of_dma_controller_register(np: xdev->dev->of_node, |
1712 | of_dma_xlate: of_dma_xilinx_xlate, data: ddev); |
1713 | if (ret) { |
1714 | dev_err(xdev->dev, "failed to register DMA to DT DMA helper\n" ); |
1715 | goto error_of_dma; |
1716 | } |
1717 | |
1718 | xilinx_dpdma_enable_irq(xdev); |
1719 | |
1720 | xilinx_dpdma_debugfs_init(xdev); |
1721 | |
1722 | dev_info(&pdev->dev, "Xilinx DPDMA engine is probed\n" ); |
1723 | |
1724 | return 0; |
1725 | |
1726 | error_of_dma: |
1727 | dma_async_device_unregister(device: ddev); |
1728 | error_dma_async: |
1729 | clk_disable_unprepare(clk: xdev->axi_clk); |
1730 | error: |
1731 | for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) |
1732 | xilinx_dpdma_chan_remove(chan: xdev->chan[i]); |
1733 | |
1734 | free_irq(xdev->irq, xdev); |
1735 | |
1736 | return ret; |
1737 | } |
1738 | |
1739 | static void xilinx_dpdma_remove(struct platform_device *pdev) |
1740 | { |
1741 | struct xilinx_dpdma_device *xdev = platform_get_drvdata(pdev); |
1742 | unsigned int i; |
1743 | |
1744 | /* Start by disabling the IRQ to avoid races during cleanup. */ |
1745 | free_irq(xdev->irq, xdev); |
1746 | |
1747 | xilinx_dpdma_disable_irq(xdev); |
1748 | of_dma_controller_free(np: pdev->dev.of_node); |
1749 | dma_async_device_unregister(device: &xdev->common); |
1750 | clk_disable_unprepare(clk: xdev->axi_clk); |
1751 | |
1752 | for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) |
1753 | xilinx_dpdma_chan_remove(chan: xdev->chan[i]); |
1754 | } |
1755 | |
1756 | static const struct of_device_id xilinx_dpdma_of_match[] = { |
1757 | { .compatible = "xlnx,zynqmp-dpdma" ,}, |
1758 | { /* end of table */ }, |
1759 | }; |
1760 | MODULE_DEVICE_TABLE(of, xilinx_dpdma_of_match); |
1761 | |
1762 | static struct platform_driver xilinx_dpdma_driver = { |
1763 | .probe = xilinx_dpdma_probe, |
1764 | .remove_new = xilinx_dpdma_remove, |
1765 | .driver = { |
1766 | .name = "xilinx-zynqmp-dpdma" , |
1767 | .of_match_table = xilinx_dpdma_of_match, |
1768 | }, |
1769 | }; |
1770 | |
1771 | module_platform_driver(xilinx_dpdma_driver); |
1772 | |
1773 | MODULE_AUTHOR("Xilinx, Inc." ); |
1774 | MODULE_DESCRIPTION("Xilinx ZynqMP DPDMA driver" ); |
1775 | MODULE_LICENSE("GPL v2" ); |
1776 | |