1 | /* |
2 | * Copyright (C) 2017 Spreadtrum Communications Inc. |
3 | * |
4 | * SPDX-License-Identifier: GPL-2.0 |
5 | */ |
6 | |
7 | #include <linux/clk.h> |
8 | #include <linux/dma-mapping.h> |
9 | #include <linux/dma/sprd-dma.h> |
10 | #include <linux/errno.h> |
11 | #include <linux/init.h> |
12 | #include <linux/interrupt.h> |
13 | #include <linux/io.h> |
14 | #include <linux/kernel.h> |
15 | #include <linux/module.h> |
16 | #include <linux/of.h> |
17 | #include <linux/of_dma.h> |
18 | #include <linux/platform_device.h> |
19 | #include <linux/pm_runtime.h> |
20 | #include <linux/slab.h> |
21 | |
22 | #include "virt-dma.h" |
23 | |
24 | #define SPRD_DMA_CHN_REG_OFFSET 0x1000 |
25 | #define SPRD_DMA_CHN_REG_LENGTH 0x40 |
26 | #define SPRD_DMA_MEMCPY_MIN_SIZE 64 |
27 | |
28 | /* DMA global registers definition */ |
29 | #define SPRD_DMA_GLB_PAUSE 0x0 |
30 | #define SPRD_DMA_GLB_FRAG_WAIT 0x4 |
31 | #define SPRD_DMA_GLB_REQ_PEND0_EN 0x8 |
32 | #define SPRD_DMA_GLB_REQ_PEND1_EN 0xc |
33 | #define SPRD_DMA_GLB_INT_RAW_STS 0x10 |
34 | #define SPRD_DMA_GLB_INT_MSK_STS 0x14 |
35 | #define SPRD_DMA_GLB_REQ_STS 0x18 |
36 | #define SPRD_DMA_GLB_CHN_EN_STS 0x1c |
37 | #define SPRD_DMA_GLB_DEBUG_STS 0x20 |
38 | #define SPRD_DMA_GLB_ARB_SEL_STS 0x24 |
39 | #define SPRD_DMA_GLB_2STAGE_GRP1 0x28 |
40 | #define SPRD_DMA_GLB_2STAGE_GRP2 0x2c |
41 | #define SPRD_DMA_GLB_REQ_UID(uid) (0x4 * ((uid) - 1)) |
42 | #define SPRD_DMA_GLB_REQ_UID_OFFSET 0x2000 |
43 | |
44 | /* DMA channel registers definition */ |
45 | #define SPRD_DMA_CHN_PAUSE 0x0 |
46 | #define SPRD_DMA_CHN_REQ 0x4 |
47 | #define SPRD_DMA_CHN_CFG 0x8 |
48 | #define SPRD_DMA_CHN_INTC 0xc |
49 | #define SPRD_DMA_CHN_SRC_ADDR 0x10 |
50 | #define SPRD_DMA_CHN_DES_ADDR 0x14 |
51 | #define SPRD_DMA_CHN_FRG_LEN 0x18 |
52 | #define SPRD_DMA_CHN_BLK_LEN 0x1c |
53 | #define SPRD_DMA_CHN_TRSC_LEN 0x20 |
54 | #define SPRD_DMA_CHN_TRSF_STEP 0x24 |
55 | #define SPRD_DMA_CHN_WARP_PTR 0x28 |
56 | #define SPRD_DMA_CHN_WARP_TO 0x2c |
57 | #define SPRD_DMA_CHN_LLIST_PTR 0x30 |
58 | #define SPRD_DMA_CHN_FRAG_STEP 0x34 |
59 | #define SPRD_DMA_CHN_SRC_BLK_STEP 0x38 |
60 | #define SPRD_DMA_CHN_DES_BLK_STEP 0x3c |
61 | |
62 | /* SPRD_DMA_GLB_2STAGE_GRP register definition */ |
63 | #define SPRD_DMA_GLB_2STAGE_EN BIT(24) |
64 | #define SPRD_DMA_GLB_CHN_INT_MASK GENMASK(23, 20) |
65 | #define SPRD_DMA_GLB_DEST_INT BIT(22) |
66 | #define SPRD_DMA_GLB_SRC_INT BIT(20) |
67 | #define SPRD_DMA_GLB_LIST_DONE_TRG BIT(19) |
68 | #define SPRD_DMA_GLB_TRANS_DONE_TRG BIT(18) |
69 | #define SPRD_DMA_GLB_BLOCK_DONE_TRG BIT(17) |
70 | #define SPRD_DMA_GLB_FRAG_DONE_TRG BIT(16) |
71 | #define SPRD_DMA_GLB_TRG_OFFSET 16 |
72 | #define SPRD_DMA_GLB_DEST_CHN_MASK GENMASK(13, 8) |
73 | #define SPRD_DMA_GLB_DEST_CHN_OFFSET 8 |
74 | #define SPRD_DMA_GLB_SRC_CHN_MASK GENMASK(5, 0) |
75 | |
76 | /* SPRD_DMA_CHN_INTC register definition */ |
77 | #define SPRD_DMA_INT_MASK GENMASK(4, 0) |
78 | #define SPRD_DMA_INT_CLR_OFFSET 24 |
79 | #define SPRD_DMA_FRAG_INT_EN BIT(0) |
80 | #define SPRD_DMA_BLK_INT_EN BIT(1) |
81 | #define SPRD_DMA_TRANS_INT_EN BIT(2) |
82 | #define SPRD_DMA_LIST_INT_EN BIT(3) |
83 | #define SPRD_DMA_CFG_ERR_INT_EN BIT(4) |
84 | |
85 | /* SPRD_DMA_CHN_CFG register definition */ |
86 | #define SPRD_DMA_CHN_EN BIT(0) |
87 | #define SPRD_DMA_LINKLIST_EN BIT(4) |
88 | #define SPRD_DMA_WAIT_BDONE_OFFSET 24 |
89 | #define SPRD_DMA_DONOT_WAIT_BDONE 1 |
90 | |
91 | /* SPRD_DMA_CHN_REQ register definition */ |
92 | #define SPRD_DMA_REQ_EN BIT(0) |
93 | |
94 | /* SPRD_DMA_CHN_PAUSE register definition */ |
95 | #define SPRD_DMA_PAUSE_EN BIT(0) |
96 | #define SPRD_DMA_PAUSE_STS BIT(2) |
97 | #define SPRD_DMA_PAUSE_CNT 0x2000 |
98 | |
99 | /* DMA_CHN_WARP_* register definition */ |
100 | #define SPRD_DMA_HIGH_ADDR_MASK GENMASK(31, 28) |
101 | #define SPRD_DMA_LOW_ADDR_MASK GENMASK(31, 0) |
102 | #define SPRD_DMA_WRAP_ADDR_MASK GENMASK(27, 0) |
103 | #define SPRD_DMA_HIGH_ADDR_OFFSET 4 |
104 | |
105 | /* SPRD_DMA_CHN_INTC register definition */ |
106 | #define SPRD_DMA_FRAG_INT_STS BIT(16) |
107 | #define SPRD_DMA_BLK_INT_STS BIT(17) |
108 | #define SPRD_DMA_TRSC_INT_STS BIT(18) |
109 | #define SPRD_DMA_LIST_INT_STS BIT(19) |
110 | #define SPRD_DMA_CFGERR_INT_STS BIT(20) |
111 | #define SPRD_DMA_CHN_INT_STS \ |
112 | (SPRD_DMA_FRAG_INT_STS | SPRD_DMA_BLK_INT_STS | \ |
113 | SPRD_DMA_TRSC_INT_STS | SPRD_DMA_LIST_INT_STS | \ |
114 | SPRD_DMA_CFGERR_INT_STS) |
115 | |
116 | /* SPRD_DMA_CHN_FRG_LEN register definition */ |
117 | #define SPRD_DMA_SRC_DATAWIDTH_OFFSET 30 |
118 | #define SPRD_DMA_DES_DATAWIDTH_OFFSET 28 |
119 | #define SPRD_DMA_SWT_MODE_OFFSET 26 |
120 | #define SPRD_DMA_REQ_MODE_OFFSET 24 |
121 | #define SPRD_DMA_REQ_MODE_MASK GENMASK(1, 0) |
122 | #define SPRD_DMA_WRAP_SEL_DEST BIT(23) |
123 | #define SPRD_DMA_WRAP_EN BIT(22) |
124 | #define SPRD_DMA_FIX_SEL_OFFSET 21 |
125 | #define SPRD_DMA_FIX_EN_OFFSET 20 |
126 | #define SPRD_DMA_LLIST_END BIT(19) |
127 | #define SPRD_DMA_FRG_LEN_MASK GENMASK(16, 0) |
128 | |
129 | /* SPRD_DMA_CHN_BLK_LEN register definition */ |
130 | #define SPRD_DMA_BLK_LEN_MASK GENMASK(16, 0) |
131 | |
132 | /* SPRD_DMA_CHN_TRSC_LEN register definition */ |
133 | #define SPRD_DMA_TRSC_LEN_MASK GENMASK(27, 0) |
134 | |
135 | /* SPRD_DMA_CHN_TRSF_STEP register definition */ |
136 | #define SPRD_DMA_DEST_TRSF_STEP_OFFSET 16 |
137 | #define SPRD_DMA_SRC_TRSF_STEP_OFFSET 0 |
138 | #define SPRD_DMA_TRSF_STEP_MASK GENMASK(15, 0) |
139 | |
140 | /* SPRD DMA_SRC_BLK_STEP register definition */ |
141 | #define SPRD_DMA_LLIST_HIGH_MASK GENMASK(31, 28) |
142 | #define SPRD_DMA_LLIST_HIGH_SHIFT 28 |
143 | |
144 | /* define DMA channel mode & trigger mode mask */ |
145 | #define SPRD_DMA_CHN_MODE_MASK GENMASK(7, 0) |
146 | #define SPRD_DMA_TRG_MODE_MASK GENMASK(7, 0) |
147 | #define SPRD_DMA_INT_TYPE_MASK GENMASK(7, 0) |
148 | |
149 | /* define the DMA transfer step type */ |
150 | #define SPRD_DMA_NONE_STEP 0 |
151 | #define SPRD_DMA_BYTE_STEP 1 |
152 | #define SPRD_DMA_SHORT_STEP 2 |
153 | #define SPRD_DMA_WORD_STEP 4 |
154 | #define SPRD_DMA_DWORD_STEP 8 |
155 | |
156 | #define SPRD_DMA_SOFTWARE_UID 0 |
157 | |
158 | /* dma data width values */ |
159 | enum sprd_dma_datawidth { |
160 | SPRD_DMA_DATAWIDTH_1_BYTE, |
161 | SPRD_DMA_DATAWIDTH_2_BYTES, |
162 | SPRD_DMA_DATAWIDTH_4_BYTES, |
163 | SPRD_DMA_DATAWIDTH_8_BYTES, |
164 | }; |
165 | |
166 | /* dma channel hardware configuration */ |
167 | struct sprd_dma_chn_hw { |
168 | u32 pause; |
169 | u32 req; |
170 | u32 cfg; |
171 | u32 intc; |
172 | u32 src_addr; |
173 | u32 des_addr; |
174 | u32 frg_len; |
175 | u32 blk_len; |
176 | u32 trsc_len; |
177 | u32 trsf_step; |
178 | u32 wrap_ptr; |
179 | u32 wrap_to; |
180 | u32 llist_ptr; |
181 | u32 frg_step; |
182 | u32 src_blk_step; |
183 | u32 des_blk_step; |
184 | }; |
185 | |
186 | /* dma request description */ |
187 | struct sprd_dma_desc { |
188 | struct virt_dma_desc vd; |
189 | struct sprd_dma_chn_hw chn_hw; |
190 | enum dma_transfer_direction dir; |
191 | }; |
192 | |
193 | /* dma channel description */ |
194 | struct sprd_dma_chn { |
195 | struct virt_dma_chan vc; |
196 | void __iomem *chn_base; |
197 | struct sprd_dma_linklist linklist; |
198 | struct dma_slave_config slave_cfg; |
199 | u32 chn_num; |
200 | u32 dev_id; |
201 | enum sprd_dma_chn_mode chn_mode; |
202 | enum sprd_dma_trg_mode trg_mode; |
203 | enum sprd_dma_int_type int_type; |
204 | struct sprd_dma_desc *cur_desc; |
205 | }; |
206 | |
207 | /* SPRD dma device */ |
208 | struct sprd_dma_dev { |
209 | struct dma_device dma_dev; |
210 | void __iomem *glb_base; |
211 | struct clk *clk; |
212 | struct clk *ashb_clk; |
213 | int irq; |
214 | u32 total_chns; |
215 | struct sprd_dma_chn channels[] __counted_by(total_chns); |
216 | }; |
217 | |
218 | static void sprd_dma_free_desc(struct virt_dma_desc *vd); |
219 | static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param); |
220 | static struct of_dma_filter_info sprd_dma_info = { |
221 | .filter_fn = sprd_dma_filter_fn, |
222 | }; |
223 | |
224 | static inline struct sprd_dma_chn *to_sprd_dma_chan(struct dma_chan *c) |
225 | { |
226 | return container_of(c, struct sprd_dma_chn, vc.chan); |
227 | } |
228 | |
229 | static inline struct sprd_dma_dev *to_sprd_dma_dev(struct dma_chan *c) |
230 | { |
231 | struct sprd_dma_chn *schan = to_sprd_dma_chan(c); |
232 | |
233 | return container_of(schan, struct sprd_dma_dev, channels[c->chan_id]); |
234 | } |
235 | |
236 | static inline struct sprd_dma_desc *to_sprd_dma_desc(struct virt_dma_desc *vd) |
237 | { |
238 | return container_of(vd, struct sprd_dma_desc, vd); |
239 | } |
240 | |
241 | static void sprd_dma_glb_update(struct sprd_dma_dev *sdev, u32 reg, |
242 | u32 mask, u32 val) |
243 | { |
244 | u32 orig = readl(addr: sdev->glb_base + reg); |
245 | u32 tmp; |
246 | |
247 | tmp = (orig & ~mask) | val; |
248 | writel(val: tmp, addr: sdev->glb_base + reg); |
249 | } |
250 | |
251 | static void sprd_dma_chn_update(struct sprd_dma_chn *schan, u32 reg, |
252 | u32 mask, u32 val) |
253 | { |
254 | u32 orig = readl(addr: schan->chn_base + reg); |
255 | u32 tmp; |
256 | |
257 | tmp = (orig & ~mask) | val; |
258 | writel(val: tmp, addr: schan->chn_base + reg); |
259 | } |
260 | |
261 | static int sprd_dma_enable(struct sprd_dma_dev *sdev) |
262 | { |
263 | int ret; |
264 | |
265 | ret = clk_prepare_enable(clk: sdev->clk); |
266 | if (ret) |
267 | return ret; |
268 | |
269 | /* |
270 | * The ashb_clk is optional and only for AGCP DMA controller, so we |
271 | * need add one condition to check if the ashb_clk need enable. |
272 | */ |
273 | if (!IS_ERR(ptr: sdev->ashb_clk)) |
274 | ret = clk_prepare_enable(clk: sdev->ashb_clk); |
275 | |
276 | return ret; |
277 | } |
278 | |
279 | static void sprd_dma_disable(struct sprd_dma_dev *sdev) |
280 | { |
281 | clk_disable_unprepare(clk: sdev->clk); |
282 | |
283 | /* |
284 | * Need to check if we need disable the optional ashb_clk for AGCP DMA. |
285 | */ |
286 | if (!IS_ERR(ptr: sdev->ashb_clk)) |
287 | clk_disable_unprepare(clk: sdev->ashb_clk); |
288 | } |
289 | |
290 | static void sprd_dma_set_uid(struct sprd_dma_chn *schan) |
291 | { |
292 | struct sprd_dma_dev *sdev = to_sprd_dma_dev(c: &schan->vc.chan); |
293 | u32 dev_id = schan->dev_id; |
294 | |
295 | if (dev_id != SPRD_DMA_SOFTWARE_UID) { |
296 | u32 uid_offset = SPRD_DMA_GLB_REQ_UID_OFFSET + |
297 | SPRD_DMA_GLB_REQ_UID(dev_id); |
298 | |
299 | writel(val: schan->chn_num + 1, addr: sdev->glb_base + uid_offset); |
300 | } |
301 | } |
302 | |
303 | static void sprd_dma_unset_uid(struct sprd_dma_chn *schan) |
304 | { |
305 | struct sprd_dma_dev *sdev = to_sprd_dma_dev(c: &schan->vc.chan); |
306 | u32 dev_id = schan->dev_id; |
307 | |
308 | if (dev_id != SPRD_DMA_SOFTWARE_UID) { |
309 | u32 uid_offset = SPRD_DMA_GLB_REQ_UID_OFFSET + |
310 | SPRD_DMA_GLB_REQ_UID(dev_id); |
311 | |
312 | writel(val: 0, addr: sdev->glb_base + uid_offset); |
313 | } |
314 | } |
315 | |
316 | static void sprd_dma_clear_int(struct sprd_dma_chn *schan) |
317 | { |
318 | sprd_dma_chn_update(schan, SPRD_DMA_CHN_INTC, |
319 | SPRD_DMA_INT_MASK << SPRD_DMA_INT_CLR_OFFSET, |
320 | SPRD_DMA_INT_MASK << SPRD_DMA_INT_CLR_OFFSET); |
321 | } |
322 | |
323 | static void sprd_dma_enable_chn(struct sprd_dma_chn *schan) |
324 | { |
325 | sprd_dma_chn_update(schan, SPRD_DMA_CHN_CFG, SPRD_DMA_CHN_EN, |
326 | SPRD_DMA_CHN_EN); |
327 | } |
328 | |
329 | static void sprd_dma_disable_chn(struct sprd_dma_chn *schan) |
330 | { |
331 | sprd_dma_chn_update(schan, SPRD_DMA_CHN_CFG, SPRD_DMA_CHN_EN, val: 0); |
332 | } |
333 | |
334 | static void sprd_dma_soft_request(struct sprd_dma_chn *schan) |
335 | { |
336 | sprd_dma_chn_update(schan, SPRD_DMA_CHN_REQ, SPRD_DMA_REQ_EN, |
337 | SPRD_DMA_REQ_EN); |
338 | } |
339 | |
340 | static void sprd_dma_pause_resume(struct sprd_dma_chn *schan, bool enable) |
341 | { |
342 | struct sprd_dma_dev *sdev = to_sprd_dma_dev(c: &schan->vc.chan); |
343 | u32 pause, timeout = SPRD_DMA_PAUSE_CNT; |
344 | |
345 | if (enable) { |
346 | sprd_dma_chn_update(schan, SPRD_DMA_CHN_PAUSE, |
347 | SPRD_DMA_PAUSE_EN, SPRD_DMA_PAUSE_EN); |
348 | |
349 | do { |
350 | pause = readl(addr: schan->chn_base + SPRD_DMA_CHN_PAUSE); |
351 | if (pause & SPRD_DMA_PAUSE_STS) |
352 | break; |
353 | |
354 | cpu_relax(); |
355 | } while (--timeout > 0); |
356 | |
357 | if (!timeout) |
358 | dev_warn(sdev->dma_dev.dev, |
359 | "pause dma controller timeout\n" ); |
360 | } else { |
361 | sprd_dma_chn_update(schan, SPRD_DMA_CHN_PAUSE, |
362 | SPRD_DMA_PAUSE_EN, val: 0); |
363 | } |
364 | } |
365 | |
366 | static void sprd_dma_stop_and_disable(struct sprd_dma_chn *schan) |
367 | { |
368 | u32 cfg = readl(addr: schan->chn_base + SPRD_DMA_CHN_CFG); |
369 | |
370 | if (!(cfg & SPRD_DMA_CHN_EN)) |
371 | return; |
372 | |
373 | sprd_dma_pause_resume(schan, enable: true); |
374 | sprd_dma_disable_chn(schan); |
375 | } |
376 | |
377 | static unsigned long sprd_dma_get_src_addr(struct sprd_dma_chn *schan) |
378 | { |
379 | unsigned long addr, addr_high; |
380 | |
381 | addr = readl(addr: schan->chn_base + SPRD_DMA_CHN_SRC_ADDR); |
382 | addr_high = readl(addr: schan->chn_base + SPRD_DMA_CHN_WARP_PTR) & |
383 | SPRD_DMA_HIGH_ADDR_MASK; |
384 | |
385 | return addr | (addr_high << SPRD_DMA_HIGH_ADDR_OFFSET); |
386 | } |
387 | |
388 | static unsigned long sprd_dma_get_dst_addr(struct sprd_dma_chn *schan) |
389 | { |
390 | unsigned long addr, addr_high; |
391 | |
392 | addr = readl(addr: schan->chn_base + SPRD_DMA_CHN_DES_ADDR); |
393 | addr_high = readl(addr: schan->chn_base + SPRD_DMA_CHN_WARP_TO) & |
394 | SPRD_DMA_HIGH_ADDR_MASK; |
395 | |
396 | return addr | (addr_high << SPRD_DMA_HIGH_ADDR_OFFSET); |
397 | } |
398 | |
399 | static enum sprd_dma_int_type sprd_dma_get_int_type(struct sprd_dma_chn *schan) |
400 | { |
401 | struct sprd_dma_dev *sdev = to_sprd_dma_dev(c: &schan->vc.chan); |
402 | u32 intc_sts = readl(addr: schan->chn_base + SPRD_DMA_CHN_INTC) & |
403 | SPRD_DMA_CHN_INT_STS; |
404 | |
405 | switch (intc_sts) { |
406 | case SPRD_DMA_CFGERR_INT_STS: |
407 | return SPRD_DMA_CFGERR_INT; |
408 | |
409 | case SPRD_DMA_LIST_INT_STS: |
410 | return SPRD_DMA_LIST_INT; |
411 | |
412 | case SPRD_DMA_TRSC_INT_STS: |
413 | return SPRD_DMA_TRANS_INT; |
414 | |
415 | case SPRD_DMA_BLK_INT_STS: |
416 | return SPRD_DMA_BLK_INT; |
417 | |
418 | case SPRD_DMA_FRAG_INT_STS: |
419 | return SPRD_DMA_FRAG_INT; |
420 | |
421 | default: |
422 | dev_warn(sdev->dma_dev.dev, "incorrect dma interrupt type\n" ); |
423 | return SPRD_DMA_NO_INT; |
424 | } |
425 | } |
426 | |
427 | static enum sprd_dma_req_mode sprd_dma_get_req_type(struct sprd_dma_chn *schan) |
428 | { |
429 | u32 frag_reg = readl(addr: schan->chn_base + SPRD_DMA_CHN_FRG_LEN); |
430 | |
431 | return (frag_reg >> SPRD_DMA_REQ_MODE_OFFSET) & SPRD_DMA_REQ_MODE_MASK; |
432 | } |
433 | |
434 | static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan) |
435 | { |
436 | struct sprd_dma_dev *sdev = to_sprd_dma_dev(c: &schan->vc.chan); |
437 | u32 val, chn = schan->chn_num + 1; |
438 | |
439 | switch (schan->chn_mode) { |
440 | case SPRD_DMA_SRC_CHN0: |
441 | val = chn & SPRD_DMA_GLB_SRC_CHN_MASK; |
442 | val |= BIT(schan->trg_mode - 1) << SPRD_DMA_GLB_TRG_OFFSET; |
443 | val |= SPRD_DMA_GLB_2STAGE_EN; |
444 | if (schan->int_type != SPRD_DMA_NO_INT) |
445 | val |= SPRD_DMA_GLB_SRC_INT; |
446 | |
447 | sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP1, mask: val, val); |
448 | break; |
449 | |
450 | case SPRD_DMA_SRC_CHN1: |
451 | val = chn & SPRD_DMA_GLB_SRC_CHN_MASK; |
452 | val |= BIT(schan->trg_mode - 1) << SPRD_DMA_GLB_TRG_OFFSET; |
453 | val |= SPRD_DMA_GLB_2STAGE_EN; |
454 | if (schan->int_type != SPRD_DMA_NO_INT) |
455 | val |= SPRD_DMA_GLB_SRC_INT; |
456 | |
457 | sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP2, mask: val, val); |
458 | break; |
459 | |
460 | case SPRD_DMA_DST_CHN0: |
461 | val = (chn << SPRD_DMA_GLB_DEST_CHN_OFFSET) & |
462 | SPRD_DMA_GLB_DEST_CHN_MASK; |
463 | val |= SPRD_DMA_GLB_2STAGE_EN; |
464 | if (schan->int_type != SPRD_DMA_NO_INT) |
465 | val |= SPRD_DMA_GLB_DEST_INT; |
466 | |
467 | sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP1, mask: val, val); |
468 | break; |
469 | |
470 | case SPRD_DMA_DST_CHN1: |
471 | val = (chn << SPRD_DMA_GLB_DEST_CHN_OFFSET) & |
472 | SPRD_DMA_GLB_DEST_CHN_MASK; |
473 | val |= SPRD_DMA_GLB_2STAGE_EN; |
474 | if (schan->int_type != SPRD_DMA_NO_INT) |
475 | val |= SPRD_DMA_GLB_DEST_INT; |
476 | |
477 | sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP2, mask: val, val); |
478 | break; |
479 | |
480 | default: |
481 | dev_err(sdev->dma_dev.dev, "invalid channel mode setting %d\n" , |
482 | schan->chn_mode); |
483 | return -EINVAL; |
484 | } |
485 | |
486 | return 0; |
487 | } |
488 | |
489 | static void sprd_dma_set_pending(struct sprd_dma_chn *schan, bool enable) |
490 | { |
491 | struct sprd_dma_dev *sdev = to_sprd_dma_dev(c: &schan->vc.chan); |
492 | u32 reg, val, req_id; |
493 | |
494 | if (schan->dev_id == SPRD_DMA_SOFTWARE_UID) |
495 | return; |
496 | |
497 | /* The DMA request id always starts from 0. */ |
498 | req_id = schan->dev_id - 1; |
499 | |
500 | if (req_id < 32) { |
501 | reg = SPRD_DMA_GLB_REQ_PEND0_EN; |
502 | val = BIT(req_id); |
503 | } else { |
504 | reg = SPRD_DMA_GLB_REQ_PEND1_EN; |
505 | val = BIT(req_id - 32); |
506 | } |
507 | |
508 | sprd_dma_glb_update(sdev, reg, mask: val, val: enable ? val : 0); |
509 | } |
510 | |
511 | static void sprd_dma_set_chn_config(struct sprd_dma_chn *schan, |
512 | struct sprd_dma_desc *sdesc) |
513 | { |
514 | struct sprd_dma_chn_hw *cfg = &sdesc->chn_hw; |
515 | |
516 | writel(val: cfg->pause, addr: schan->chn_base + SPRD_DMA_CHN_PAUSE); |
517 | writel(val: cfg->cfg, addr: schan->chn_base + SPRD_DMA_CHN_CFG); |
518 | writel(val: cfg->intc, addr: schan->chn_base + SPRD_DMA_CHN_INTC); |
519 | writel(val: cfg->src_addr, addr: schan->chn_base + SPRD_DMA_CHN_SRC_ADDR); |
520 | writel(val: cfg->des_addr, addr: schan->chn_base + SPRD_DMA_CHN_DES_ADDR); |
521 | writel(val: cfg->frg_len, addr: schan->chn_base + SPRD_DMA_CHN_FRG_LEN); |
522 | writel(val: cfg->blk_len, addr: schan->chn_base + SPRD_DMA_CHN_BLK_LEN); |
523 | writel(val: cfg->trsc_len, addr: schan->chn_base + SPRD_DMA_CHN_TRSC_LEN); |
524 | writel(val: cfg->trsf_step, addr: schan->chn_base + SPRD_DMA_CHN_TRSF_STEP); |
525 | writel(val: cfg->wrap_ptr, addr: schan->chn_base + SPRD_DMA_CHN_WARP_PTR); |
526 | writel(val: cfg->wrap_to, addr: schan->chn_base + SPRD_DMA_CHN_WARP_TO); |
527 | writel(val: cfg->llist_ptr, addr: schan->chn_base + SPRD_DMA_CHN_LLIST_PTR); |
528 | writel(val: cfg->frg_step, addr: schan->chn_base + SPRD_DMA_CHN_FRAG_STEP); |
529 | writel(val: cfg->src_blk_step, addr: schan->chn_base + SPRD_DMA_CHN_SRC_BLK_STEP); |
530 | writel(val: cfg->des_blk_step, addr: schan->chn_base + SPRD_DMA_CHN_DES_BLK_STEP); |
531 | writel(val: cfg->req, addr: schan->chn_base + SPRD_DMA_CHN_REQ); |
532 | } |
533 | |
534 | static void sprd_dma_start(struct sprd_dma_chn *schan) |
535 | { |
536 | struct virt_dma_desc *vd = vchan_next_desc(vc: &schan->vc); |
537 | |
538 | if (!vd) |
539 | return; |
540 | |
541 | list_del(entry: &vd->node); |
542 | schan->cur_desc = to_sprd_dma_desc(vd); |
543 | |
544 | /* |
545 | * Set 2-stage configuration if the channel starts one 2-stage |
546 | * transfer. |
547 | */ |
548 | if (schan->chn_mode && sprd_dma_set_2stage_config(schan)) |
549 | return; |
550 | |
551 | /* |
552 | * Copy the DMA configuration from DMA descriptor to this hardware |
553 | * channel. |
554 | */ |
555 | sprd_dma_set_chn_config(schan, sdesc: schan->cur_desc); |
556 | sprd_dma_set_uid(schan); |
557 | sprd_dma_set_pending(schan, enable: true); |
558 | sprd_dma_enable_chn(schan); |
559 | |
560 | if (schan->dev_id == SPRD_DMA_SOFTWARE_UID && |
561 | schan->chn_mode != SPRD_DMA_DST_CHN0 && |
562 | schan->chn_mode != SPRD_DMA_DST_CHN1) |
563 | sprd_dma_soft_request(schan); |
564 | } |
565 | |
566 | static void sprd_dma_stop(struct sprd_dma_chn *schan) |
567 | { |
568 | sprd_dma_stop_and_disable(schan); |
569 | sprd_dma_set_pending(schan, enable: false); |
570 | sprd_dma_unset_uid(schan); |
571 | sprd_dma_clear_int(schan); |
572 | schan->cur_desc = NULL; |
573 | } |
574 | |
575 | static bool sprd_dma_check_trans_done(enum sprd_dma_int_type int_type, |
576 | enum sprd_dma_req_mode req_mode) |
577 | { |
578 | if (int_type == SPRD_DMA_NO_INT) |
579 | return false; |
580 | |
581 | if (int_type >= req_mode + 1) |
582 | return true; |
583 | else |
584 | return false; |
585 | } |
586 | |
587 | static irqreturn_t dma_irq_handle(int irq, void *dev_id) |
588 | { |
589 | struct sprd_dma_dev *sdev = (struct sprd_dma_dev *)dev_id; |
590 | u32 irq_status = readl(addr: sdev->glb_base + SPRD_DMA_GLB_INT_MSK_STS); |
591 | struct sprd_dma_chn *schan; |
592 | struct sprd_dma_desc *sdesc; |
593 | enum sprd_dma_req_mode req_type; |
594 | enum sprd_dma_int_type int_type; |
595 | bool trans_done = false, cyclic = false; |
596 | u32 i; |
597 | |
598 | while (irq_status) { |
599 | i = __ffs(irq_status); |
600 | irq_status &= (irq_status - 1); |
601 | schan = &sdev->channels[i]; |
602 | |
603 | spin_lock(lock: &schan->vc.lock); |
604 | |
605 | sdesc = schan->cur_desc; |
606 | if (!sdesc) { |
607 | spin_unlock(lock: &schan->vc.lock); |
608 | return IRQ_HANDLED; |
609 | } |
610 | |
611 | int_type = sprd_dma_get_int_type(schan); |
612 | req_type = sprd_dma_get_req_type(schan); |
613 | sprd_dma_clear_int(schan); |
614 | |
615 | /* cyclic mode schedule callback */ |
616 | cyclic = schan->linklist.phy_addr ? true : false; |
617 | if (cyclic == true) { |
618 | vchan_cyclic_callback(vd: &sdesc->vd); |
619 | } else { |
620 | /* Check if the dma request descriptor is done. */ |
621 | trans_done = sprd_dma_check_trans_done(int_type, req_mode: req_type); |
622 | if (trans_done == true) { |
623 | vchan_cookie_complete(vd: &sdesc->vd); |
624 | schan->cur_desc = NULL; |
625 | sprd_dma_start(schan); |
626 | } |
627 | } |
628 | spin_unlock(lock: &schan->vc.lock); |
629 | } |
630 | |
631 | return IRQ_HANDLED; |
632 | } |
633 | |
634 | static int sprd_dma_alloc_chan_resources(struct dma_chan *chan) |
635 | { |
636 | return pm_runtime_get_sync(dev: chan->device->dev); |
637 | } |
638 | |
639 | static void sprd_dma_free_chan_resources(struct dma_chan *chan) |
640 | { |
641 | struct sprd_dma_chn *schan = to_sprd_dma_chan(c: chan); |
642 | struct virt_dma_desc *cur_vd = NULL; |
643 | unsigned long flags; |
644 | |
645 | spin_lock_irqsave(&schan->vc.lock, flags); |
646 | if (schan->cur_desc) |
647 | cur_vd = &schan->cur_desc->vd; |
648 | |
649 | sprd_dma_stop(schan); |
650 | spin_unlock_irqrestore(lock: &schan->vc.lock, flags); |
651 | |
652 | if (cur_vd) |
653 | sprd_dma_free_desc(vd: cur_vd); |
654 | |
655 | vchan_free_chan_resources(vc: &schan->vc); |
656 | pm_runtime_put(dev: chan->device->dev); |
657 | } |
658 | |
659 | static enum dma_status sprd_dma_tx_status(struct dma_chan *chan, |
660 | dma_cookie_t cookie, |
661 | struct dma_tx_state *txstate) |
662 | { |
663 | struct sprd_dma_chn *schan = to_sprd_dma_chan(c: chan); |
664 | struct virt_dma_desc *vd; |
665 | unsigned long flags; |
666 | enum dma_status ret; |
667 | u32 pos; |
668 | |
669 | ret = dma_cookie_status(chan, cookie, state: txstate); |
670 | if (ret == DMA_COMPLETE || !txstate) |
671 | return ret; |
672 | |
673 | spin_lock_irqsave(&schan->vc.lock, flags); |
674 | vd = vchan_find_desc(&schan->vc, cookie); |
675 | if (vd) { |
676 | struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd); |
677 | struct sprd_dma_chn_hw *hw = &sdesc->chn_hw; |
678 | |
679 | if (hw->trsc_len > 0) |
680 | pos = hw->trsc_len; |
681 | else if (hw->blk_len > 0) |
682 | pos = hw->blk_len; |
683 | else if (hw->frg_len > 0) |
684 | pos = hw->frg_len; |
685 | else |
686 | pos = 0; |
687 | } else if (schan->cur_desc && schan->cur_desc->vd.tx.cookie == cookie) { |
688 | struct sprd_dma_desc *sdesc = schan->cur_desc; |
689 | |
690 | if (sdesc->dir == DMA_DEV_TO_MEM) |
691 | pos = sprd_dma_get_dst_addr(schan); |
692 | else |
693 | pos = sprd_dma_get_src_addr(schan); |
694 | } else { |
695 | pos = 0; |
696 | } |
697 | spin_unlock_irqrestore(lock: &schan->vc.lock, flags); |
698 | |
699 | dma_set_residue(state: txstate, residue: pos); |
700 | return ret; |
701 | } |
702 | |
703 | static void sprd_dma_issue_pending(struct dma_chan *chan) |
704 | { |
705 | struct sprd_dma_chn *schan = to_sprd_dma_chan(c: chan); |
706 | unsigned long flags; |
707 | |
708 | spin_lock_irqsave(&schan->vc.lock, flags); |
709 | if (vchan_issue_pending(vc: &schan->vc) && !schan->cur_desc) |
710 | sprd_dma_start(schan); |
711 | spin_unlock_irqrestore(lock: &schan->vc.lock, flags); |
712 | } |
713 | |
714 | static int sprd_dma_get_datawidth(enum dma_slave_buswidth buswidth) |
715 | { |
716 | switch (buswidth) { |
717 | case DMA_SLAVE_BUSWIDTH_1_BYTE: |
718 | case DMA_SLAVE_BUSWIDTH_2_BYTES: |
719 | case DMA_SLAVE_BUSWIDTH_4_BYTES: |
720 | case DMA_SLAVE_BUSWIDTH_8_BYTES: |
721 | return ffs(buswidth) - 1; |
722 | |
723 | default: |
724 | return -EINVAL; |
725 | } |
726 | } |
727 | |
728 | static int sprd_dma_get_step(enum dma_slave_buswidth buswidth) |
729 | { |
730 | switch (buswidth) { |
731 | case DMA_SLAVE_BUSWIDTH_1_BYTE: |
732 | case DMA_SLAVE_BUSWIDTH_2_BYTES: |
733 | case DMA_SLAVE_BUSWIDTH_4_BYTES: |
734 | case DMA_SLAVE_BUSWIDTH_8_BYTES: |
735 | return buswidth; |
736 | |
737 | default: |
738 | return -EINVAL; |
739 | } |
740 | } |
741 | |
742 | static int sprd_dma_fill_desc(struct dma_chan *chan, |
743 | struct sprd_dma_chn_hw *hw, |
744 | unsigned int sglen, int sg_index, |
745 | dma_addr_t src, dma_addr_t dst, u32 len, |
746 | enum dma_transfer_direction dir, |
747 | unsigned long flags, |
748 | struct dma_slave_config *slave_cfg) |
749 | { |
750 | struct sprd_dma_dev *sdev = to_sprd_dma_dev(c: chan); |
751 | struct sprd_dma_chn *schan = to_sprd_dma_chan(c: chan); |
752 | enum sprd_dma_chn_mode chn_mode = schan->chn_mode; |
753 | u32 req_mode = (flags >> SPRD_DMA_REQ_SHIFT) & SPRD_DMA_REQ_MODE_MASK; |
754 | u32 int_mode = flags & SPRD_DMA_INT_MASK; |
755 | int src_datawidth, dst_datawidth, src_step, dst_step; |
756 | u32 temp, fix_mode = 0, fix_en = 0; |
757 | phys_addr_t llist_ptr; |
758 | |
759 | if (dir == DMA_MEM_TO_DEV) { |
760 | src_step = sprd_dma_get_step(buswidth: slave_cfg->src_addr_width); |
761 | if (src_step < 0) { |
762 | dev_err(sdev->dma_dev.dev, "invalid source step\n" ); |
763 | return src_step; |
764 | } |
765 | |
766 | /* |
767 | * For 2-stage transfer, destination channel step can not be 0, |
768 | * since destination device is AON IRAM. |
769 | */ |
770 | if (chn_mode == SPRD_DMA_DST_CHN0 || |
771 | chn_mode == SPRD_DMA_DST_CHN1) |
772 | dst_step = src_step; |
773 | else |
774 | dst_step = SPRD_DMA_NONE_STEP; |
775 | } else { |
776 | dst_step = sprd_dma_get_step(buswidth: slave_cfg->dst_addr_width); |
777 | if (dst_step < 0) { |
778 | dev_err(sdev->dma_dev.dev, "invalid destination step\n" ); |
779 | return dst_step; |
780 | } |
781 | src_step = SPRD_DMA_NONE_STEP; |
782 | } |
783 | |
784 | src_datawidth = sprd_dma_get_datawidth(buswidth: slave_cfg->src_addr_width); |
785 | if (src_datawidth < 0) { |
786 | dev_err(sdev->dma_dev.dev, "invalid source datawidth\n" ); |
787 | return src_datawidth; |
788 | } |
789 | |
790 | dst_datawidth = sprd_dma_get_datawidth(buswidth: slave_cfg->dst_addr_width); |
791 | if (dst_datawidth < 0) { |
792 | dev_err(sdev->dma_dev.dev, "invalid destination datawidth\n" ); |
793 | return dst_datawidth; |
794 | } |
795 | |
796 | hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET; |
797 | |
798 | /* |
799 | * wrap_ptr and wrap_to will save the high 4 bits source address and |
800 | * destination address. |
801 | */ |
802 | hw->wrap_ptr = (src >> SPRD_DMA_HIGH_ADDR_OFFSET) & SPRD_DMA_HIGH_ADDR_MASK; |
803 | hw->wrap_to = (dst >> SPRD_DMA_HIGH_ADDR_OFFSET) & SPRD_DMA_HIGH_ADDR_MASK; |
804 | hw->src_addr = src & SPRD_DMA_LOW_ADDR_MASK; |
805 | hw->des_addr = dst & SPRD_DMA_LOW_ADDR_MASK; |
806 | |
807 | /* |
808 | * If the src step and dst step both are 0 or both are not 0, that means |
809 | * we can not enable the fix mode. If one is 0 and another one is not, |
810 | * we can enable the fix mode. |
811 | */ |
812 | if ((src_step != 0 && dst_step != 0) || (src_step | dst_step) == 0) { |
813 | fix_en = 0; |
814 | } else { |
815 | fix_en = 1; |
816 | if (src_step) |
817 | fix_mode = 1; |
818 | else |
819 | fix_mode = 0; |
820 | } |
821 | |
822 | hw->intc = int_mode | SPRD_DMA_CFG_ERR_INT_EN; |
823 | |
824 | temp = src_datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET; |
825 | temp |= dst_datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET; |
826 | temp |= req_mode << SPRD_DMA_REQ_MODE_OFFSET; |
827 | temp |= fix_mode << SPRD_DMA_FIX_SEL_OFFSET; |
828 | temp |= fix_en << SPRD_DMA_FIX_EN_OFFSET; |
829 | temp |= schan->linklist.wrap_addr ? |
830 | SPRD_DMA_WRAP_EN | SPRD_DMA_WRAP_SEL_DEST : 0; |
831 | temp |= slave_cfg->src_maxburst & SPRD_DMA_FRG_LEN_MASK; |
832 | hw->frg_len = temp; |
833 | |
834 | hw->blk_len = slave_cfg->src_maxburst & SPRD_DMA_BLK_LEN_MASK; |
835 | hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK; |
836 | |
837 | temp = (dst_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET; |
838 | temp |= (src_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_SRC_TRSF_STEP_OFFSET; |
839 | hw->trsf_step = temp; |
840 | |
841 | /* link-list configuration */ |
842 | if (schan->linklist.phy_addr) { |
843 | hw->cfg |= SPRD_DMA_LINKLIST_EN; |
844 | |
845 | /* link-list index */ |
846 | temp = sglen ? (sg_index + 1) % sglen : 0; |
847 | |
848 | /* Next link-list configuration's physical address offset */ |
849 | temp = temp * sizeof(*hw) + SPRD_DMA_CHN_SRC_ADDR; |
850 | /* |
851 | * Set the link-list pointer point to next link-list |
852 | * configuration's physical address. |
853 | */ |
854 | llist_ptr = schan->linklist.phy_addr + temp; |
855 | hw->llist_ptr = lower_32_bits(llist_ptr); |
856 | hw->src_blk_step = (upper_32_bits(llist_ptr) << SPRD_DMA_LLIST_HIGH_SHIFT) & |
857 | SPRD_DMA_LLIST_HIGH_MASK; |
858 | |
859 | if (schan->linklist.wrap_addr) { |
860 | hw->wrap_ptr |= schan->linklist.wrap_addr & |
861 | SPRD_DMA_WRAP_ADDR_MASK; |
862 | hw->wrap_to |= dst & SPRD_DMA_WRAP_ADDR_MASK; |
863 | } |
864 | } else { |
865 | hw->llist_ptr = 0; |
866 | hw->src_blk_step = 0; |
867 | } |
868 | |
869 | hw->frg_step = 0; |
870 | hw->des_blk_step = 0; |
871 | return 0; |
872 | } |
873 | |
874 | static int sprd_dma_fill_linklist_desc(struct dma_chan *chan, |
875 | unsigned int sglen, int sg_index, |
876 | dma_addr_t src, dma_addr_t dst, u32 len, |
877 | enum dma_transfer_direction dir, |
878 | unsigned long flags, |
879 | struct dma_slave_config *slave_cfg) |
880 | { |
881 | struct sprd_dma_chn *schan = to_sprd_dma_chan(c: chan); |
882 | struct sprd_dma_chn_hw *hw; |
883 | |
884 | if (!schan->linklist.virt_addr) |
885 | return -EINVAL; |
886 | |
887 | hw = (struct sprd_dma_chn_hw *)(schan->linklist.virt_addr + |
888 | sg_index * sizeof(*hw)); |
889 | |
890 | return sprd_dma_fill_desc(chan, hw, sglen, sg_index, src, dst, len, |
891 | dir, flags, slave_cfg); |
892 | } |
893 | |
894 | static struct dma_async_tx_descriptor * |
895 | sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, |
896 | size_t len, unsigned long flags) |
897 | { |
898 | struct sprd_dma_chn *schan = to_sprd_dma_chan(c: chan); |
899 | struct sprd_dma_desc *sdesc; |
900 | struct sprd_dma_chn_hw *hw; |
901 | enum sprd_dma_datawidth datawidth; |
902 | u32 step, temp; |
903 | |
904 | sdesc = kzalloc(size: sizeof(*sdesc), GFP_NOWAIT); |
905 | if (!sdesc) |
906 | return NULL; |
907 | |
908 | hw = &sdesc->chn_hw; |
909 | |
910 | hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET; |
911 | hw->intc = SPRD_DMA_TRANS_INT | SPRD_DMA_CFG_ERR_INT_EN; |
912 | hw->src_addr = src & SPRD_DMA_LOW_ADDR_MASK; |
913 | hw->des_addr = dest & SPRD_DMA_LOW_ADDR_MASK; |
914 | hw->wrap_ptr = (src >> SPRD_DMA_HIGH_ADDR_OFFSET) & |
915 | SPRD_DMA_HIGH_ADDR_MASK; |
916 | hw->wrap_to = (dest >> SPRD_DMA_HIGH_ADDR_OFFSET) & |
917 | SPRD_DMA_HIGH_ADDR_MASK; |
918 | |
919 | if (IS_ALIGNED(len, 8)) { |
920 | datawidth = SPRD_DMA_DATAWIDTH_8_BYTES; |
921 | step = SPRD_DMA_DWORD_STEP; |
922 | } else if (IS_ALIGNED(len, 4)) { |
923 | datawidth = SPRD_DMA_DATAWIDTH_4_BYTES; |
924 | step = SPRD_DMA_WORD_STEP; |
925 | } else if (IS_ALIGNED(len, 2)) { |
926 | datawidth = SPRD_DMA_DATAWIDTH_2_BYTES; |
927 | step = SPRD_DMA_SHORT_STEP; |
928 | } else { |
929 | datawidth = SPRD_DMA_DATAWIDTH_1_BYTE; |
930 | step = SPRD_DMA_BYTE_STEP; |
931 | } |
932 | |
933 | temp = datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET; |
934 | temp |= datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET; |
935 | temp |= SPRD_DMA_TRANS_REQ << SPRD_DMA_REQ_MODE_OFFSET; |
936 | temp |= len & SPRD_DMA_FRG_LEN_MASK; |
937 | hw->frg_len = temp; |
938 | |
939 | hw->blk_len = len & SPRD_DMA_BLK_LEN_MASK; |
940 | hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK; |
941 | |
942 | temp = (step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET; |
943 | temp |= (step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_SRC_TRSF_STEP_OFFSET; |
944 | hw->trsf_step = temp; |
945 | |
946 | return vchan_tx_prep(vc: &schan->vc, vd: &sdesc->vd, tx_flags: flags); |
947 | } |
948 | |
949 | static struct dma_async_tx_descriptor * |
950 | sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
951 | unsigned int sglen, enum dma_transfer_direction dir, |
952 | unsigned long flags, void *context) |
953 | { |
954 | struct sprd_dma_chn *schan = to_sprd_dma_chan(c: chan); |
955 | struct dma_slave_config *slave_cfg = &schan->slave_cfg; |
956 | dma_addr_t src = 0, dst = 0; |
957 | dma_addr_t start_src = 0, start_dst = 0; |
958 | struct sprd_dma_desc *sdesc; |
959 | struct scatterlist *sg; |
960 | u32 len = 0; |
961 | int ret, i; |
962 | |
963 | if (!is_slave_direction(direction: dir)) |
964 | return NULL; |
965 | |
966 | if (context) { |
967 | struct sprd_dma_linklist *ll_cfg = |
968 | (struct sprd_dma_linklist *)context; |
969 | |
970 | schan->linklist.phy_addr = ll_cfg->phy_addr; |
971 | schan->linklist.virt_addr = ll_cfg->virt_addr; |
972 | schan->linklist.wrap_addr = ll_cfg->wrap_addr; |
973 | } else { |
974 | schan->linklist.phy_addr = 0; |
975 | schan->linklist.virt_addr = 0; |
976 | schan->linklist.wrap_addr = 0; |
977 | } |
978 | |
979 | /* |
980 | * Set channel mode, interrupt mode and trigger mode for 2-stage |
981 | * transfer. |
982 | */ |
983 | schan->chn_mode = |
984 | (flags >> SPRD_DMA_CHN_MODE_SHIFT) & SPRD_DMA_CHN_MODE_MASK; |
985 | schan->trg_mode = |
986 | (flags >> SPRD_DMA_TRG_MODE_SHIFT) & SPRD_DMA_TRG_MODE_MASK; |
987 | schan->int_type = flags & SPRD_DMA_INT_TYPE_MASK; |
988 | |
989 | sdesc = kzalloc(size: sizeof(*sdesc), GFP_NOWAIT); |
990 | if (!sdesc) |
991 | return NULL; |
992 | |
993 | sdesc->dir = dir; |
994 | |
995 | for_each_sg(sgl, sg, sglen, i) { |
996 | len = sg_dma_len(sg); |
997 | |
998 | if (dir == DMA_MEM_TO_DEV) { |
999 | src = sg_dma_address(sg); |
1000 | dst = slave_cfg->dst_addr; |
1001 | } else { |
1002 | src = slave_cfg->src_addr; |
1003 | dst = sg_dma_address(sg); |
1004 | } |
1005 | |
1006 | if (!i) { |
1007 | start_src = src; |
1008 | start_dst = dst; |
1009 | } |
1010 | |
1011 | /* |
1012 | * The link-list mode needs at least 2 link-list |
1013 | * configurations. If there is only one sg, it doesn't |
1014 | * need to fill the link-list configuration. |
1015 | */ |
1016 | if (sglen < 2) |
1017 | break; |
1018 | |
1019 | ret = sprd_dma_fill_linklist_desc(chan, sglen, sg_index: i, src, dst, len, |
1020 | dir, flags, slave_cfg); |
1021 | if (ret) { |
1022 | kfree(objp: sdesc); |
1023 | return NULL; |
1024 | } |
1025 | } |
1026 | |
1027 | ret = sprd_dma_fill_desc(chan, hw: &sdesc->chn_hw, sglen: 0, sg_index: 0, src: start_src, |
1028 | dst: start_dst, len, dir, flags, slave_cfg); |
1029 | if (ret) { |
1030 | kfree(objp: sdesc); |
1031 | return NULL; |
1032 | } |
1033 | |
1034 | return vchan_tx_prep(vc: &schan->vc, vd: &sdesc->vd, tx_flags: flags); |
1035 | } |
1036 | |
1037 | static int sprd_dma_slave_config(struct dma_chan *chan, |
1038 | struct dma_slave_config *config) |
1039 | { |
1040 | struct sprd_dma_chn *schan = to_sprd_dma_chan(c: chan); |
1041 | struct dma_slave_config *slave_cfg = &schan->slave_cfg; |
1042 | |
1043 | memcpy(slave_cfg, config, sizeof(*config)); |
1044 | return 0; |
1045 | } |
1046 | |
1047 | static int sprd_dma_pause(struct dma_chan *chan) |
1048 | { |
1049 | struct sprd_dma_chn *schan = to_sprd_dma_chan(c: chan); |
1050 | unsigned long flags; |
1051 | |
1052 | spin_lock_irqsave(&schan->vc.lock, flags); |
1053 | sprd_dma_pause_resume(schan, enable: true); |
1054 | spin_unlock_irqrestore(lock: &schan->vc.lock, flags); |
1055 | |
1056 | return 0; |
1057 | } |
1058 | |
1059 | static int sprd_dma_resume(struct dma_chan *chan) |
1060 | { |
1061 | struct sprd_dma_chn *schan = to_sprd_dma_chan(c: chan); |
1062 | unsigned long flags; |
1063 | |
1064 | spin_lock_irqsave(&schan->vc.lock, flags); |
1065 | sprd_dma_pause_resume(schan, enable: false); |
1066 | spin_unlock_irqrestore(lock: &schan->vc.lock, flags); |
1067 | |
1068 | return 0; |
1069 | } |
1070 | |
1071 | static int sprd_dma_terminate_all(struct dma_chan *chan) |
1072 | { |
1073 | struct sprd_dma_chn *schan = to_sprd_dma_chan(c: chan); |
1074 | struct virt_dma_desc *cur_vd = NULL; |
1075 | unsigned long flags; |
1076 | LIST_HEAD(head); |
1077 | |
1078 | spin_lock_irqsave(&schan->vc.lock, flags); |
1079 | if (schan->cur_desc) |
1080 | cur_vd = &schan->cur_desc->vd; |
1081 | |
1082 | sprd_dma_stop(schan); |
1083 | |
1084 | vchan_get_all_descriptors(vc: &schan->vc, head: &head); |
1085 | spin_unlock_irqrestore(lock: &schan->vc.lock, flags); |
1086 | |
1087 | if (cur_vd) |
1088 | sprd_dma_free_desc(vd: cur_vd); |
1089 | |
1090 | vchan_dma_desc_free_list(vc: &schan->vc, head: &head); |
1091 | return 0; |
1092 | } |
1093 | |
1094 | static void sprd_dma_free_desc(struct virt_dma_desc *vd) |
1095 | { |
1096 | struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd); |
1097 | |
1098 | kfree(objp: sdesc); |
1099 | } |
1100 | |
1101 | static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param) |
1102 | { |
1103 | struct sprd_dma_chn *schan = to_sprd_dma_chan(c: chan); |
1104 | u32 slave_id = *(u32 *)param; |
1105 | |
1106 | schan->dev_id = slave_id; |
1107 | return true; |
1108 | } |
1109 | |
1110 | static int sprd_dma_probe(struct platform_device *pdev) |
1111 | { |
1112 | struct device_node *np = pdev->dev.of_node; |
1113 | struct sprd_dma_dev *sdev; |
1114 | struct sprd_dma_chn *dma_chn; |
1115 | u32 chn_count; |
1116 | int ret, i; |
1117 | |
1118 | ret = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(36)); |
1119 | if (ret) { |
1120 | ret = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(32)); |
1121 | if (ret) { |
1122 | dev_err(&pdev->dev, "unable to set coherent mask to 32\n" ); |
1123 | return ret; |
1124 | } |
1125 | } |
1126 | |
1127 | /* Parse new and deprecated dma-channels properties */ |
1128 | ret = device_property_read_u32(dev: &pdev->dev, propname: "dma-channels" , val: &chn_count); |
1129 | if (ret) |
1130 | ret = device_property_read_u32(dev: &pdev->dev, propname: "#dma-channels" , |
1131 | val: &chn_count); |
1132 | if (ret) { |
1133 | dev_err(&pdev->dev, "get dma channels count failed\n" ); |
1134 | return ret; |
1135 | } |
1136 | |
1137 | sdev = devm_kzalloc(dev: &pdev->dev, |
1138 | struct_size(sdev, channels, chn_count), |
1139 | GFP_KERNEL); |
1140 | if (!sdev) |
1141 | return -ENOMEM; |
1142 | |
1143 | sdev->clk = devm_clk_get(dev: &pdev->dev, id: "enable" ); |
1144 | if (IS_ERR(ptr: sdev->clk)) { |
1145 | dev_err(&pdev->dev, "get enable clock failed\n" ); |
1146 | return PTR_ERR(ptr: sdev->clk); |
1147 | } |
1148 | |
1149 | /* ashb clock is optional for AGCP DMA */ |
1150 | sdev->ashb_clk = devm_clk_get(dev: &pdev->dev, id: "ashb_eb" ); |
1151 | if (IS_ERR(ptr: sdev->ashb_clk)) |
1152 | dev_warn(&pdev->dev, "no optional ashb eb clock\n" ); |
1153 | |
1154 | /* |
1155 | * We have three DMA controllers: AP DMA, AON DMA and AGCP DMA. For AGCP |
1156 | * DMA controller, it can or do not request the irq, which will save |
1157 | * system power without resuming system by DMA interrupts if AGCP DMA |
1158 | * does not request the irq. Thus the DMA interrupts property should |
1159 | * be optional. |
1160 | */ |
1161 | sdev->irq = platform_get_irq(pdev, 0); |
1162 | if (sdev->irq > 0) { |
1163 | ret = devm_request_irq(dev: &pdev->dev, irq: sdev->irq, handler: dma_irq_handle, |
1164 | irqflags: 0, devname: "sprd_dma" , dev_id: (void *)sdev); |
1165 | if (ret < 0) { |
1166 | dev_err(&pdev->dev, "request dma irq failed\n" ); |
1167 | return ret; |
1168 | } |
1169 | } else { |
1170 | dev_warn(&pdev->dev, "no interrupts for the dma controller\n" ); |
1171 | } |
1172 | |
1173 | sdev->glb_base = devm_platform_ioremap_resource(pdev, index: 0); |
1174 | if (IS_ERR(ptr: sdev->glb_base)) |
1175 | return PTR_ERR(ptr: sdev->glb_base); |
1176 | |
1177 | dma_cap_set(DMA_MEMCPY, sdev->dma_dev.cap_mask); |
1178 | sdev->total_chns = chn_count; |
1179 | INIT_LIST_HEAD(list: &sdev->dma_dev.channels); |
1180 | INIT_LIST_HEAD(list: &sdev->dma_dev.global_node); |
1181 | sdev->dma_dev.dev = &pdev->dev; |
1182 | sdev->dma_dev.device_alloc_chan_resources = sprd_dma_alloc_chan_resources; |
1183 | sdev->dma_dev.device_free_chan_resources = sprd_dma_free_chan_resources; |
1184 | sdev->dma_dev.device_tx_status = sprd_dma_tx_status; |
1185 | sdev->dma_dev.device_issue_pending = sprd_dma_issue_pending; |
1186 | sdev->dma_dev.device_prep_dma_memcpy = sprd_dma_prep_dma_memcpy; |
1187 | sdev->dma_dev.device_prep_slave_sg = sprd_dma_prep_slave_sg; |
1188 | sdev->dma_dev.device_config = sprd_dma_slave_config; |
1189 | sdev->dma_dev.device_pause = sprd_dma_pause; |
1190 | sdev->dma_dev.device_resume = sprd_dma_resume; |
1191 | sdev->dma_dev.device_terminate_all = sprd_dma_terminate_all; |
1192 | |
1193 | for (i = 0; i < chn_count; i++) { |
1194 | dma_chn = &sdev->channels[i]; |
1195 | dma_chn->chn_num = i; |
1196 | dma_chn->cur_desc = NULL; |
1197 | /* get each channel's registers base address. */ |
1198 | dma_chn->chn_base = sdev->glb_base + SPRD_DMA_CHN_REG_OFFSET + |
1199 | SPRD_DMA_CHN_REG_LENGTH * i; |
1200 | |
1201 | dma_chn->vc.desc_free = sprd_dma_free_desc; |
1202 | vchan_init(vc: &dma_chn->vc, dmadev: &sdev->dma_dev); |
1203 | } |
1204 | |
1205 | platform_set_drvdata(pdev, data: sdev); |
1206 | ret = sprd_dma_enable(sdev); |
1207 | if (ret) |
1208 | return ret; |
1209 | |
1210 | pm_runtime_set_active(dev: &pdev->dev); |
1211 | pm_runtime_enable(dev: &pdev->dev); |
1212 | |
1213 | ret = pm_runtime_get_sync(dev: &pdev->dev); |
1214 | if (ret < 0) |
1215 | goto err_rpm; |
1216 | |
1217 | ret = dma_async_device_register(device: &sdev->dma_dev); |
1218 | if (ret < 0) { |
1219 | dev_err(&pdev->dev, "register dma device failed:%d\n" , ret); |
1220 | goto err_register; |
1221 | } |
1222 | |
1223 | sprd_dma_info.dma_cap = sdev->dma_dev.cap_mask; |
1224 | ret = of_dma_controller_register(np, of_dma_xlate: of_dma_simple_xlate, |
1225 | data: &sprd_dma_info); |
1226 | if (ret) |
1227 | goto err_of_register; |
1228 | |
1229 | pm_runtime_put(dev: &pdev->dev); |
1230 | return 0; |
1231 | |
1232 | err_of_register: |
1233 | dma_async_device_unregister(device: &sdev->dma_dev); |
1234 | err_register: |
1235 | pm_runtime_put_noidle(dev: &pdev->dev); |
1236 | pm_runtime_disable(dev: &pdev->dev); |
1237 | err_rpm: |
1238 | sprd_dma_disable(sdev); |
1239 | return ret; |
1240 | } |
1241 | |
1242 | static void sprd_dma_remove(struct platform_device *pdev) |
1243 | { |
1244 | struct sprd_dma_dev *sdev = platform_get_drvdata(pdev); |
1245 | struct sprd_dma_chn *c, *cn; |
1246 | |
1247 | pm_runtime_get_sync(dev: &pdev->dev); |
1248 | |
1249 | /* explicitly free the irq */ |
1250 | if (sdev->irq > 0) |
1251 | devm_free_irq(dev: &pdev->dev, irq: sdev->irq, dev_id: sdev); |
1252 | |
1253 | list_for_each_entry_safe(c, cn, &sdev->dma_dev.channels, |
1254 | vc.chan.device_node) { |
1255 | list_del(entry: &c->vc.chan.device_node); |
1256 | tasklet_kill(t: &c->vc.task); |
1257 | } |
1258 | |
1259 | of_dma_controller_free(np: pdev->dev.of_node); |
1260 | dma_async_device_unregister(device: &sdev->dma_dev); |
1261 | sprd_dma_disable(sdev); |
1262 | |
1263 | pm_runtime_put_noidle(dev: &pdev->dev); |
1264 | pm_runtime_disable(dev: &pdev->dev); |
1265 | } |
1266 | |
1267 | static const struct of_device_id sprd_dma_match[] = { |
1268 | { .compatible = "sprd,sc9860-dma" , }, |
1269 | {}, |
1270 | }; |
1271 | MODULE_DEVICE_TABLE(of, sprd_dma_match); |
1272 | |
1273 | static int __maybe_unused sprd_dma_runtime_suspend(struct device *dev) |
1274 | { |
1275 | struct sprd_dma_dev *sdev = dev_get_drvdata(dev); |
1276 | |
1277 | sprd_dma_disable(sdev); |
1278 | return 0; |
1279 | } |
1280 | |
1281 | static int __maybe_unused sprd_dma_runtime_resume(struct device *dev) |
1282 | { |
1283 | struct sprd_dma_dev *sdev = dev_get_drvdata(dev); |
1284 | int ret; |
1285 | |
1286 | ret = sprd_dma_enable(sdev); |
1287 | if (ret) |
1288 | dev_err(sdev->dma_dev.dev, "enable dma failed\n" ); |
1289 | |
1290 | return ret; |
1291 | } |
1292 | |
1293 | static const struct dev_pm_ops sprd_dma_pm_ops = { |
1294 | SET_RUNTIME_PM_OPS(sprd_dma_runtime_suspend, |
1295 | sprd_dma_runtime_resume, |
1296 | NULL) |
1297 | }; |
1298 | |
1299 | static struct platform_driver sprd_dma_driver = { |
1300 | .probe = sprd_dma_probe, |
1301 | .remove_new = sprd_dma_remove, |
1302 | .driver = { |
1303 | .name = "sprd-dma" , |
1304 | .of_match_table = sprd_dma_match, |
1305 | .pm = &sprd_dma_pm_ops, |
1306 | }, |
1307 | }; |
1308 | module_platform_driver(sprd_dma_driver); |
1309 | |
1310 | MODULE_LICENSE("GPL v2" ); |
1311 | MODULE_DESCRIPTION("DMA driver for Spreadtrum" ); |
1312 | MODULE_AUTHOR("Baolin Wang <baolin.wang@spreadtrum.com>" ); |
1313 | MODULE_AUTHOR("Eric Long <eric.long@spreadtrum.com>" ); |
1314 | MODULE_ALIAS("platform:sprd-dma" ); |
1315 | |