1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (c) 2013 - 2015 Linaro Ltd. |
4 | * Copyright (c) 2013 HiSilicon Limited. |
5 | */ |
6 | #include <linux/sched.h> |
7 | #include <linux/device.h> |
8 | #include <linux/dma-mapping.h> |
9 | #include <linux/dmapool.h> |
10 | #include <linux/dmaengine.h> |
11 | #include <linux/init.h> |
12 | #include <linux/interrupt.h> |
13 | #include <linux/kernel.h> |
14 | #include <linux/module.h> |
15 | #include <linux/platform_device.h> |
16 | #include <linux/slab.h> |
17 | #include <linux/spinlock.h> |
18 | #include <linux/of.h> |
19 | #include <linux/clk.h> |
20 | #include <linux/of_dma.h> |
21 | |
22 | #include "virt-dma.h" |
23 | |
24 | #define DRIVER_NAME "k3-dma" |
25 | #define DMA_MAX_SIZE 0x1ffc |
26 | #define DMA_CYCLIC_MAX_PERIOD 0x1000 |
27 | #define LLI_BLOCK_SIZE (4 * PAGE_SIZE) |
28 | |
29 | #define INT_STAT 0x00 |
30 | #define INT_TC1 0x04 |
31 | #define INT_TC2 0x08 |
32 | #define INT_ERR1 0x0c |
33 | #define INT_ERR2 0x10 |
34 | #define INT_TC1_MASK 0x18 |
35 | #define INT_TC2_MASK 0x1c |
36 | #define INT_ERR1_MASK 0x20 |
37 | #define INT_ERR2_MASK 0x24 |
38 | #define INT_TC1_RAW 0x600 |
39 | #define INT_TC2_RAW 0x608 |
40 | #define INT_ERR1_RAW 0x610 |
41 | #define INT_ERR2_RAW 0x618 |
42 | #define CH_PRI 0x688 |
43 | #define CH_STAT 0x690 |
44 | #define CX_CUR_CNT 0x704 |
45 | #define CX_LLI 0x800 |
46 | #define CX_CNT1 0x80c |
47 | #define CX_CNT0 0x810 |
48 | #define CX_SRC 0x814 |
49 | #define CX_DST 0x818 |
50 | #define CX_CFG 0x81c |
51 | |
52 | #define CX_LLI_CHAIN_EN 0x2 |
53 | #define CX_CFG_EN 0x1 |
54 | #define CX_CFG_NODEIRQ BIT(1) |
55 | #define CX_CFG_MEM2PER (0x1 << 2) |
56 | #define CX_CFG_PER2MEM (0x2 << 2) |
57 | #define CX_CFG_SRCINCR (0x1 << 31) |
58 | #define CX_CFG_DSTINCR (0x1 << 30) |
59 | |
60 | struct k3_desc_hw { |
61 | u32 lli; |
62 | u32 reserved[3]; |
63 | u32 count; |
64 | u32 saddr; |
65 | u32 daddr; |
66 | u32 config; |
67 | } __aligned(32); |
68 | |
69 | struct k3_dma_desc_sw { |
70 | struct virt_dma_desc vd; |
71 | dma_addr_t desc_hw_lli; |
72 | size_t desc_num; |
73 | size_t size; |
74 | struct k3_desc_hw *desc_hw; |
75 | }; |
76 | |
77 | struct k3_dma_phy; |
78 | |
79 | struct k3_dma_chan { |
80 | u32 ccfg; |
81 | struct virt_dma_chan vc; |
82 | struct k3_dma_phy *phy; |
83 | struct list_head node; |
84 | dma_addr_t dev_addr; |
85 | enum dma_status status; |
86 | bool cyclic; |
87 | struct dma_slave_config slave_config; |
88 | }; |
89 | |
90 | struct k3_dma_phy { |
91 | u32 idx; |
92 | void __iomem *base; |
93 | struct k3_dma_chan *vchan; |
94 | struct k3_dma_desc_sw *ds_run; |
95 | struct k3_dma_desc_sw *ds_done; |
96 | }; |
97 | |
98 | struct k3_dma_dev { |
99 | struct dma_device slave; |
100 | void __iomem *base; |
101 | struct tasklet_struct task; |
102 | spinlock_t lock; |
103 | struct list_head chan_pending; |
104 | struct k3_dma_phy *phy; |
105 | struct k3_dma_chan *chans; |
106 | struct clk *clk; |
107 | struct dma_pool *pool; |
108 | u32 dma_channels; |
109 | u32 dma_requests; |
110 | u32 dma_channel_mask; |
111 | unsigned int irq; |
112 | }; |
113 | |
114 | |
115 | #define K3_FLAG_NOCLK BIT(1) |
116 | |
117 | struct k3dma_soc_data { |
118 | unsigned long flags; |
119 | }; |
120 | |
121 | |
122 | #define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave) |
123 | |
124 | static int k3_dma_config_write(struct dma_chan *chan, |
125 | enum dma_transfer_direction dir, |
126 | struct dma_slave_config *cfg); |
127 | |
128 | static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan) |
129 | { |
130 | return container_of(chan, struct k3_dma_chan, vc.chan); |
131 | } |
132 | |
133 | static void k3_dma_pause_dma(struct k3_dma_phy *phy, bool on) |
134 | { |
135 | u32 val = 0; |
136 | |
137 | if (on) { |
138 | val = readl_relaxed(phy->base + CX_CFG); |
139 | val |= CX_CFG_EN; |
140 | writel_relaxed(val, phy->base + CX_CFG); |
141 | } else { |
142 | val = readl_relaxed(phy->base + CX_CFG); |
143 | val &= ~CX_CFG_EN; |
144 | writel_relaxed(val, phy->base + CX_CFG); |
145 | } |
146 | } |
147 | |
148 | static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d) |
149 | { |
150 | u32 val = 0; |
151 | |
152 | k3_dma_pause_dma(phy, on: false); |
153 | |
154 | val = 0x1 << phy->idx; |
155 | writel_relaxed(val, d->base + INT_TC1_RAW); |
156 | writel_relaxed(val, d->base + INT_TC2_RAW); |
157 | writel_relaxed(val, d->base + INT_ERR1_RAW); |
158 | writel_relaxed(val, d->base + INT_ERR2_RAW); |
159 | } |
160 | |
161 | static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw) |
162 | { |
163 | writel_relaxed(hw->lli, phy->base + CX_LLI); |
164 | writel_relaxed(hw->count, phy->base + CX_CNT0); |
165 | writel_relaxed(hw->saddr, phy->base + CX_SRC); |
166 | writel_relaxed(hw->daddr, phy->base + CX_DST); |
167 | writel_relaxed(hw->config, phy->base + CX_CFG); |
168 | } |
169 | |
170 | static u32 k3_dma_get_curr_cnt(struct k3_dma_dev *d, struct k3_dma_phy *phy) |
171 | { |
172 | u32 cnt = 0; |
173 | |
174 | cnt = readl_relaxed(d->base + CX_CUR_CNT + phy->idx * 0x10); |
175 | cnt &= 0xffff; |
176 | return cnt; |
177 | } |
178 | |
179 | static u32 k3_dma_get_curr_lli(struct k3_dma_phy *phy) |
180 | { |
181 | return readl_relaxed(phy->base + CX_LLI); |
182 | } |
183 | |
184 | static u32 k3_dma_get_chan_stat(struct k3_dma_dev *d) |
185 | { |
186 | return readl_relaxed(d->base + CH_STAT); |
187 | } |
188 | |
189 | static void k3_dma_enable_dma(struct k3_dma_dev *d, bool on) |
190 | { |
191 | if (on) { |
192 | /* set same priority */ |
193 | writel_relaxed(0x0, d->base + CH_PRI); |
194 | |
195 | /* unmask irq */ |
196 | writel_relaxed(0xffff, d->base + INT_TC1_MASK); |
197 | writel_relaxed(0xffff, d->base + INT_TC2_MASK); |
198 | writel_relaxed(0xffff, d->base + INT_ERR1_MASK); |
199 | writel_relaxed(0xffff, d->base + INT_ERR2_MASK); |
200 | } else { |
201 | /* mask irq */ |
202 | writel_relaxed(0x0, d->base + INT_TC1_MASK); |
203 | writel_relaxed(0x0, d->base + INT_TC2_MASK); |
204 | writel_relaxed(0x0, d->base + INT_ERR1_MASK); |
205 | writel_relaxed(0x0, d->base + INT_ERR2_MASK); |
206 | } |
207 | } |
208 | |
209 | static irqreturn_t k3_dma_int_handler(int irq, void *dev_id) |
210 | { |
211 | struct k3_dma_dev *d = (struct k3_dma_dev *)dev_id; |
212 | struct k3_dma_phy *p; |
213 | struct k3_dma_chan *c; |
214 | u32 stat = readl_relaxed(d->base + INT_STAT); |
215 | u32 tc1 = readl_relaxed(d->base + INT_TC1); |
216 | u32 tc2 = readl_relaxed(d->base + INT_TC2); |
217 | u32 err1 = readl_relaxed(d->base + INT_ERR1); |
218 | u32 err2 = readl_relaxed(d->base + INT_ERR2); |
219 | u32 i, irq_chan = 0; |
220 | |
221 | while (stat) { |
222 | i = __ffs(stat); |
223 | stat &= ~BIT(i); |
224 | if (likely(tc1 & BIT(i)) || (tc2 & BIT(i))) { |
225 | |
226 | p = &d->phy[i]; |
227 | c = p->vchan; |
228 | if (c && (tc1 & BIT(i))) { |
229 | spin_lock(lock: &c->vc.lock); |
230 | if (p->ds_run != NULL) { |
231 | vchan_cookie_complete(vd: &p->ds_run->vd); |
232 | p->ds_done = p->ds_run; |
233 | p->ds_run = NULL; |
234 | } |
235 | spin_unlock(lock: &c->vc.lock); |
236 | } |
237 | if (c && (tc2 & BIT(i))) { |
238 | spin_lock(lock: &c->vc.lock); |
239 | if (p->ds_run != NULL) |
240 | vchan_cyclic_callback(vd: &p->ds_run->vd); |
241 | spin_unlock(lock: &c->vc.lock); |
242 | } |
243 | irq_chan |= BIT(i); |
244 | } |
245 | if (unlikely((err1 & BIT(i)) || (err2 & BIT(i)))) |
246 | dev_warn(d->slave.dev, "DMA ERR\n" ); |
247 | } |
248 | |
249 | writel_relaxed(irq_chan, d->base + INT_TC1_RAW); |
250 | writel_relaxed(irq_chan, d->base + INT_TC2_RAW); |
251 | writel_relaxed(err1, d->base + INT_ERR1_RAW); |
252 | writel_relaxed(err2, d->base + INT_ERR2_RAW); |
253 | |
254 | if (irq_chan) |
255 | tasklet_schedule(t: &d->task); |
256 | |
257 | if (irq_chan || err1 || err2) |
258 | return IRQ_HANDLED; |
259 | |
260 | return IRQ_NONE; |
261 | } |
262 | |
263 | static int k3_dma_start_txd(struct k3_dma_chan *c) |
264 | { |
265 | struct k3_dma_dev *d = to_k3_dma(c->vc.chan.device); |
266 | struct virt_dma_desc *vd = vchan_next_desc(vc: &c->vc); |
267 | |
268 | if (!c->phy) |
269 | return -EAGAIN; |
270 | |
271 | if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d)) |
272 | return -EAGAIN; |
273 | |
274 | /* Avoid losing track of ds_run if a transaction is in flight */ |
275 | if (c->phy->ds_run) |
276 | return -EAGAIN; |
277 | |
278 | if (vd) { |
279 | struct k3_dma_desc_sw *ds = |
280 | container_of(vd, struct k3_dma_desc_sw, vd); |
281 | /* |
282 | * fetch and remove request from vc->desc_issued |
283 | * so vc->desc_issued only contains desc pending |
284 | */ |
285 | list_del(entry: &ds->vd.node); |
286 | |
287 | c->phy->ds_run = ds; |
288 | c->phy->ds_done = NULL; |
289 | /* start dma */ |
290 | k3_dma_set_desc(phy: c->phy, hw: &ds->desc_hw[0]); |
291 | return 0; |
292 | } |
293 | c->phy->ds_run = NULL; |
294 | c->phy->ds_done = NULL; |
295 | return -EAGAIN; |
296 | } |
297 | |
298 | static void k3_dma_tasklet(struct tasklet_struct *t) |
299 | { |
300 | struct k3_dma_dev *d = from_tasklet(d, t, task); |
301 | struct k3_dma_phy *p; |
302 | struct k3_dma_chan *c, *cn; |
303 | unsigned pch, pch_alloc = 0; |
304 | |
305 | /* check new dma request of running channel in vc->desc_issued */ |
306 | list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) { |
307 | spin_lock_irq(lock: &c->vc.lock); |
308 | p = c->phy; |
309 | if (p && p->ds_done) { |
310 | if (k3_dma_start_txd(c)) { |
311 | /* No current txd associated with this channel */ |
312 | dev_dbg(d->slave.dev, "pchan %u: free\n" , p->idx); |
313 | /* Mark this channel free */ |
314 | c->phy = NULL; |
315 | p->vchan = NULL; |
316 | } |
317 | } |
318 | spin_unlock_irq(lock: &c->vc.lock); |
319 | } |
320 | |
321 | /* check new channel request in d->chan_pending */ |
322 | spin_lock_irq(lock: &d->lock); |
323 | for (pch = 0; pch < d->dma_channels; pch++) { |
324 | if (!(d->dma_channel_mask & (1 << pch))) |
325 | continue; |
326 | |
327 | p = &d->phy[pch]; |
328 | |
329 | if (p->vchan == NULL && !list_empty(head: &d->chan_pending)) { |
330 | c = list_first_entry(&d->chan_pending, |
331 | struct k3_dma_chan, node); |
332 | /* remove from d->chan_pending */ |
333 | list_del_init(entry: &c->node); |
334 | pch_alloc |= 1 << pch; |
335 | /* Mark this channel allocated */ |
336 | p->vchan = c; |
337 | c->phy = p; |
338 | dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n" , pch, &c->vc); |
339 | } |
340 | } |
341 | spin_unlock_irq(lock: &d->lock); |
342 | |
343 | for (pch = 0; pch < d->dma_channels; pch++) { |
344 | if (!(d->dma_channel_mask & (1 << pch))) |
345 | continue; |
346 | |
347 | if (pch_alloc & (1 << pch)) { |
348 | p = &d->phy[pch]; |
349 | c = p->vchan; |
350 | if (c) { |
351 | spin_lock_irq(lock: &c->vc.lock); |
352 | k3_dma_start_txd(c); |
353 | spin_unlock_irq(lock: &c->vc.lock); |
354 | } |
355 | } |
356 | } |
357 | } |
358 | |
359 | static void k3_dma_free_chan_resources(struct dma_chan *chan) |
360 | { |
361 | struct k3_dma_chan *c = to_k3_chan(chan); |
362 | struct k3_dma_dev *d = to_k3_dma(chan->device); |
363 | unsigned long flags; |
364 | |
365 | spin_lock_irqsave(&d->lock, flags); |
366 | list_del_init(entry: &c->node); |
367 | spin_unlock_irqrestore(lock: &d->lock, flags); |
368 | |
369 | vchan_free_chan_resources(vc: &c->vc); |
370 | c->ccfg = 0; |
371 | } |
372 | |
373 | static enum dma_status k3_dma_tx_status(struct dma_chan *chan, |
374 | dma_cookie_t cookie, struct dma_tx_state *state) |
375 | { |
376 | struct k3_dma_chan *c = to_k3_chan(chan); |
377 | struct k3_dma_dev *d = to_k3_dma(chan->device); |
378 | struct k3_dma_phy *p; |
379 | struct virt_dma_desc *vd; |
380 | unsigned long flags; |
381 | enum dma_status ret; |
382 | size_t bytes = 0; |
383 | |
384 | ret = dma_cookie_status(chan: &c->vc.chan, cookie, state); |
385 | if (ret == DMA_COMPLETE) |
386 | return ret; |
387 | |
388 | spin_lock_irqsave(&c->vc.lock, flags); |
389 | p = c->phy; |
390 | ret = c->status; |
391 | |
392 | /* |
393 | * If the cookie is on our issue queue, then the residue is |
394 | * its total size. |
395 | */ |
396 | vd = vchan_find_desc(&c->vc, cookie); |
397 | if (vd && !c->cyclic) { |
398 | bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size; |
399 | } else if ((!p) || (!p->ds_run)) { |
400 | bytes = 0; |
401 | } else { |
402 | struct k3_dma_desc_sw *ds = p->ds_run; |
403 | u32 clli = 0, index = 0; |
404 | |
405 | bytes = k3_dma_get_curr_cnt(d, phy: p); |
406 | clli = k3_dma_get_curr_lli(phy: p); |
407 | index = ((clli - ds->desc_hw_lli) / |
408 | sizeof(struct k3_desc_hw)) + 1; |
409 | for (; index < ds->desc_num; index++) { |
410 | bytes += ds->desc_hw[index].count; |
411 | /* end of lli */ |
412 | if (!ds->desc_hw[index].lli) |
413 | break; |
414 | } |
415 | } |
416 | spin_unlock_irqrestore(lock: &c->vc.lock, flags); |
417 | dma_set_residue(state, residue: bytes); |
418 | return ret; |
419 | } |
420 | |
421 | static void k3_dma_issue_pending(struct dma_chan *chan) |
422 | { |
423 | struct k3_dma_chan *c = to_k3_chan(chan); |
424 | struct k3_dma_dev *d = to_k3_dma(chan->device); |
425 | unsigned long flags; |
426 | |
427 | spin_lock_irqsave(&c->vc.lock, flags); |
428 | /* add request to vc->desc_issued */ |
429 | if (vchan_issue_pending(vc: &c->vc)) { |
430 | spin_lock(lock: &d->lock); |
431 | if (!c->phy) { |
432 | if (list_empty(head: &c->node)) { |
433 | /* if new channel, add chan_pending */ |
434 | list_add_tail(new: &c->node, head: &d->chan_pending); |
435 | /* check in tasklet */ |
436 | tasklet_schedule(t: &d->task); |
437 | dev_dbg(d->slave.dev, "vchan %p: issued\n" , &c->vc); |
438 | } |
439 | } |
440 | spin_unlock(lock: &d->lock); |
441 | } else |
442 | dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n" , &c->vc); |
443 | spin_unlock_irqrestore(lock: &c->vc.lock, flags); |
444 | } |
445 | |
446 | static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst, |
447 | dma_addr_t src, size_t len, u32 num, u32 ccfg) |
448 | { |
449 | if (num != ds->desc_num - 1) |
450 | ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) * |
451 | sizeof(struct k3_desc_hw); |
452 | |
453 | ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN; |
454 | ds->desc_hw[num].count = len; |
455 | ds->desc_hw[num].saddr = src; |
456 | ds->desc_hw[num].daddr = dst; |
457 | ds->desc_hw[num].config = ccfg; |
458 | } |
459 | |
460 | static struct k3_dma_desc_sw *k3_dma_alloc_desc_resource(int num, |
461 | struct dma_chan *chan) |
462 | { |
463 | struct k3_dma_chan *c = to_k3_chan(chan); |
464 | struct k3_dma_desc_sw *ds; |
465 | struct k3_dma_dev *d = to_k3_dma(chan->device); |
466 | int lli_limit = LLI_BLOCK_SIZE / sizeof(struct k3_desc_hw); |
467 | |
468 | if (num > lli_limit) { |
469 | dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n" , |
470 | &c->vc, num, lli_limit); |
471 | return NULL; |
472 | } |
473 | |
474 | ds = kzalloc(size: sizeof(*ds), GFP_NOWAIT); |
475 | if (!ds) |
476 | return NULL; |
477 | |
478 | ds->desc_hw = dma_pool_zalloc(pool: d->pool, GFP_NOWAIT, handle: &ds->desc_hw_lli); |
479 | if (!ds->desc_hw) { |
480 | dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n" , &c->vc); |
481 | kfree(objp: ds); |
482 | return NULL; |
483 | } |
484 | ds->desc_num = num; |
485 | return ds; |
486 | } |
487 | |
488 | static struct dma_async_tx_descriptor *k3_dma_prep_memcpy( |
489 | struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, |
490 | size_t len, unsigned long flags) |
491 | { |
492 | struct k3_dma_chan *c = to_k3_chan(chan); |
493 | struct k3_dma_desc_sw *ds; |
494 | size_t copy = 0; |
495 | int num = 0; |
496 | |
497 | if (!len) |
498 | return NULL; |
499 | |
500 | num = DIV_ROUND_UP(len, DMA_MAX_SIZE); |
501 | |
502 | ds = k3_dma_alloc_desc_resource(num, chan); |
503 | if (!ds) |
504 | return NULL; |
505 | |
506 | c->cyclic = 0; |
507 | ds->size = len; |
508 | num = 0; |
509 | |
510 | if (!c->ccfg) { |
511 | /* default is memtomem, without calling device_config */ |
512 | c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN; |
513 | c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */ |
514 | c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */ |
515 | } |
516 | |
517 | do { |
518 | copy = min_t(size_t, len, DMA_MAX_SIZE); |
519 | k3_dma_fill_desc(ds, dst, src, len: copy, num: num++, ccfg: c->ccfg); |
520 | |
521 | src += copy; |
522 | dst += copy; |
523 | len -= copy; |
524 | } while (len); |
525 | |
526 | ds->desc_hw[num-1].lli = 0; /* end of link */ |
527 | return vchan_tx_prep(vc: &c->vc, vd: &ds->vd, tx_flags: flags); |
528 | } |
529 | |
530 | static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg( |
531 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen, |
532 | enum dma_transfer_direction dir, unsigned long flags, void *context) |
533 | { |
534 | struct k3_dma_chan *c = to_k3_chan(chan); |
535 | struct k3_dma_desc_sw *ds; |
536 | size_t len, avail, total = 0; |
537 | struct scatterlist *sg; |
538 | dma_addr_t addr, src = 0, dst = 0; |
539 | int num = sglen, i; |
540 | |
541 | if (sgl == NULL) |
542 | return NULL; |
543 | |
544 | c->cyclic = 0; |
545 | |
546 | for_each_sg(sgl, sg, sglen, i) { |
547 | avail = sg_dma_len(sg); |
548 | if (avail > DMA_MAX_SIZE) |
549 | num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1; |
550 | } |
551 | |
552 | ds = k3_dma_alloc_desc_resource(num, chan); |
553 | if (!ds) |
554 | return NULL; |
555 | num = 0; |
556 | k3_dma_config_write(chan, dir, cfg: &c->slave_config); |
557 | |
558 | for_each_sg(sgl, sg, sglen, i) { |
559 | addr = sg_dma_address(sg); |
560 | avail = sg_dma_len(sg); |
561 | total += avail; |
562 | |
563 | do { |
564 | len = min_t(size_t, avail, DMA_MAX_SIZE); |
565 | |
566 | if (dir == DMA_MEM_TO_DEV) { |
567 | src = addr; |
568 | dst = c->dev_addr; |
569 | } else if (dir == DMA_DEV_TO_MEM) { |
570 | src = c->dev_addr; |
571 | dst = addr; |
572 | } |
573 | |
574 | k3_dma_fill_desc(ds, dst, src, len, num: num++, ccfg: c->ccfg); |
575 | |
576 | addr += len; |
577 | avail -= len; |
578 | } while (avail); |
579 | } |
580 | |
581 | ds->desc_hw[num-1].lli = 0; /* end of link */ |
582 | ds->size = total; |
583 | return vchan_tx_prep(vc: &c->vc, vd: &ds->vd, tx_flags: flags); |
584 | } |
585 | |
586 | static struct dma_async_tx_descriptor * |
587 | k3_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, |
588 | size_t buf_len, size_t period_len, |
589 | enum dma_transfer_direction dir, |
590 | unsigned long flags) |
591 | { |
592 | struct k3_dma_chan *c = to_k3_chan(chan); |
593 | struct k3_dma_desc_sw *ds; |
594 | size_t len, avail, total = 0; |
595 | dma_addr_t addr, src = 0, dst = 0; |
596 | int num = 1, since = 0; |
597 | size_t modulo = DMA_CYCLIC_MAX_PERIOD; |
598 | u32 en_tc2 = 0; |
599 | |
600 | dev_dbg(chan->device->dev, "%s: buf %pad, dst %pad, buf len %zu, period_len = %zu, dir %d\n" , |
601 | __func__, &buf_addr, &to_k3_chan(chan)->dev_addr, |
602 | buf_len, period_len, (int)dir); |
603 | |
604 | avail = buf_len; |
605 | if (avail > modulo) |
606 | num += DIV_ROUND_UP(avail, modulo) - 1; |
607 | |
608 | ds = k3_dma_alloc_desc_resource(num, chan); |
609 | if (!ds) |
610 | return NULL; |
611 | |
612 | c->cyclic = 1; |
613 | addr = buf_addr; |
614 | avail = buf_len; |
615 | total = avail; |
616 | num = 0; |
617 | k3_dma_config_write(chan, dir, cfg: &c->slave_config); |
618 | |
619 | if (period_len < modulo) |
620 | modulo = period_len; |
621 | |
622 | do { |
623 | len = min_t(size_t, avail, modulo); |
624 | |
625 | if (dir == DMA_MEM_TO_DEV) { |
626 | src = addr; |
627 | dst = c->dev_addr; |
628 | } else if (dir == DMA_DEV_TO_MEM) { |
629 | src = c->dev_addr; |
630 | dst = addr; |
631 | } |
632 | since += len; |
633 | if (since >= period_len) { |
634 | /* descriptor asks for TC2 interrupt on completion */ |
635 | en_tc2 = CX_CFG_NODEIRQ; |
636 | since -= period_len; |
637 | } else |
638 | en_tc2 = 0; |
639 | |
640 | k3_dma_fill_desc(ds, dst, src, len, num: num++, ccfg: c->ccfg | en_tc2); |
641 | |
642 | addr += len; |
643 | avail -= len; |
644 | } while (avail); |
645 | |
646 | /* "Cyclic" == end of link points back to start of link */ |
647 | ds->desc_hw[num - 1].lli |= ds->desc_hw_lli; |
648 | |
649 | ds->size = total; |
650 | |
651 | return vchan_tx_prep(vc: &c->vc, vd: &ds->vd, tx_flags: flags); |
652 | } |
653 | |
654 | static int k3_dma_config(struct dma_chan *chan, |
655 | struct dma_slave_config *cfg) |
656 | { |
657 | struct k3_dma_chan *c = to_k3_chan(chan); |
658 | |
659 | memcpy(&c->slave_config, cfg, sizeof(*cfg)); |
660 | |
661 | return 0; |
662 | } |
663 | |
664 | static int k3_dma_config_write(struct dma_chan *chan, |
665 | enum dma_transfer_direction dir, |
666 | struct dma_slave_config *cfg) |
667 | { |
668 | struct k3_dma_chan *c = to_k3_chan(chan); |
669 | u32 maxburst = 0, val = 0; |
670 | enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; |
671 | |
672 | if (dir == DMA_DEV_TO_MEM) { |
673 | c->ccfg = CX_CFG_DSTINCR; |
674 | c->dev_addr = cfg->src_addr; |
675 | maxburst = cfg->src_maxburst; |
676 | width = cfg->src_addr_width; |
677 | } else if (dir == DMA_MEM_TO_DEV) { |
678 | c->ccfg = CX_CFG_SRCINCR; |
679 | c->dev_addr = cfg->dst_addr; |
680 | maxburst = cfg->dst_maxburst; |
681 | width = cfg->dst_addr_width; |
682 | } |
683 | switch (width) { |
684 | case DMA_SLAVE_BUSWIDTH_1_BYTE: |
685 | case DMA_SLAVE_BUSWIDTH_2_BYTES: |
686 | case DMA_SLAVE_BUSWIDTH_4_BYTES: |
687 | case DMA_SLAVE_BUSWIDTH_8_BYTES: |
688 | val = __ffs(width); |
689 | break; |
690 | default: |
691 | val = 3; |
692 | break; |
693 | } |
694 | c->ccfg |= (val << 12) | (val << 16); |
695 | |
696 | if ((maxburst == 0) || (maxburst > 16)) |
697 | val = 15; |
698 | else |
699 | val = maxburst - 1; |
700 | c->ccfg |= (val << 20) | (val << 24); |
701 | c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN; |
702 | |
703 | /* specific request line */ |
704 | c->ccfg |= c->vc.chan.chan_id << 4; |
705 | |
706 | return 0; |
707 | } |
708 | |
709 | static void k3_dma_free_desc(struct virt_dma_desc *vd) |
710 | { |
711 | struct k3_dma_desc_sw *ds = |
712 | container_of(vd, struct k3_dma_desc_sw, vd); |
713 | struct k3_dma_dev *d = to_k3_dma(vd->tx.chan->device); |
714 | |
715 | dma_pool_free(pool: d->pool, vaddr: ds->desc_hw, addr: ds->desc_hw_lli); |
716 | kfree(objp: ds); |
717 | } |
718 | |
719 | static int k3_dma_terminate_all(struct dma_chan *chan) |
720 | { |
721 | struct k3_dma_chan *c = to_k3_chan(chan); |
722 | struct k3_dma_dev *d = to_k3_dma(chan->device); |
723 | struct k3_dma_phy *p = c->phy; |
724 | unsigned long flags; |
725 | LIST_HEAD(head); |
726 | |
727 | dev_dbg(d->slave.dev, "vchan %p: terminate all\n" , &c->vc); |
728 | |
729 | /* Prevent this channel being scheduled */ |
730 | spin_lock(lock: &d->lock); |
731 | list_del_init(entry: &c->node); |
732 | spin_unlock(lock: &d->lock); |
733 | |
734 | /* Clear the tx descriptor lists */ |
735 | spin_lock_irqsave(&c->vc.lock, flags); |
736 | vchan_get_all_descriptors(vc: &c->vc, head: &head); |
737 | if (p) { |
738 | /* vchan is assigned to a pchan - stop the channel */ |
739 | k3_dma_terminate_chan(phy: p, d); |
740 | c->phy = NULL; |
741 | p->vchan = NULL; |
742 | if (p->ds_run) { |
743 | vchan_terminate_vdesc(vd: &p->ds_run->vd); |
744 | p->ds_run = NULL; |
745 | } |
746 | p->ds_done = NULL; |
747 | } |
748 | spin_unlock_irqrestore(lock: &c->vc.lock, flags); |
749 | vchan_dma_desc_free_list(vc: &c->vc, head: &head); |
750 | |
751 | return 0; |
752 | } |
753 | |
754 | static void k3_dma_synchronize(struct dma_chan *chan) |
755 | { |
756 | struct k3_dma_chan *c = to_k3_chan(chan); |
757 | |
758 | vchan_synchronize(vc: &c->vc); |
759 | } |
760 | |
761 | static int k3_dma_transfer_pause(struct dma_chan *chan) |
762 | { |
763 | struct k3_dma_chan *c = to_k3_chan(chan); |
764 | struct k3_dma_dev *d = to_k3_dma(chan->device); |
765 | struct k3_dma_phy *p = c->phy; |
766 | |
767 | dev_dbg(d->slave.dev, "vchan %p: pause\n" , &c->vc); |
768 | if (c->status == DMA_IN_PROGRESS) { |
769 | c->status = DMA_PAUSED; |
770 | if (p) { |
771 | k3_dma_pause_dma(phy: p, on: false); |
772 | } else { |
773 | spin_lock(lock: &d->lock); |
774 | list_del_init(entry: &c->node); |
775 | spin_unlock(lock: &d->lock); |
776 | } |
777 | } |
778 | |
779 | return 0; |
780 | } |
781 | |
782 | static int k3_dma_transfer_resume(struct dma_chan *chan) |
783 | { |
784 | struct k3_dma_chan *c = to_k3_chan(chan); |
785 | struct k3_dma_dev *d = to_k3_dma(chan->device); |
786 | struct k3_dma_phy *p = c->phy; |
787 | unsigned long flags; |
788 | |
789 | dev_dbg(d->slave.dev, "vchan %p: resume\n" , &c->vc); |
790 | spin_lock_irqsave(&c->vc.lock, flags); |
791 | if (c->status == DMA_PAUSED) { |
792 | c->status = DMA_IN_PROGRESS; |
793 | if (p) { |
794 | k3_dma_pause_dma(phy: p, on: true); |
795 | } else if (!list_empty(head: &c->vc.desc_issued)) { |
796 | spin_lock(lock: &d->lock); |
797 | list_add_tail(new: &c->node, head: &d->chan_pending); |
798 | spin_unlock(lock: &d->lock); |
799 | } |
800 | } |
801 | spin_unlock_irqrestore(lock: &c->vc.lock, flags); |
802 | |
803 | return 0; |
804 | } |
805 | |
806 | static const struct k3dma_soc_data k3_v1_dma_data = { |
807 | .flags = 0, |
808 | }; |
809 | |
810 | static const struct k3dma_soc_data asp_v1_dma_data = { |
811 | .flags = K3_FLAG_NOCLK, |
812 | }; |
813 | |
814 | static const struct of_device_id k3_pdma_dt_ids[] = { |
815 | { .compatible = "hisilicon,k3-dma-1.0" , |
816 | .data = &k3_v1_dma_data |
817 | }, |
818 | { .compatible = "hisilicon,hisi-pcm-asp-dma-1.0" , |
819 | .data = &asp_v1_dma_data |
820 | }, |
821 | {} |
822 | }; |
823 | MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids); |
824 | |
825 | static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec, |
826 | struct of_dma *ofdma) |
827 | { |
828 | struct k3_dma_dev *d = ofdma->of_dma_data; |
829 | unsigned int request = dma_spec->args[0]; |
830 | |
831 | if (request >= d->dma_requests) |
832 | return NULL; |
833 | |
834 | return dma_get_slave_channel(chan: &(d->chans[request].vc.chan)); |
835 | } |
836 | |
837 | static int k3_dma_probe(struct platform_device *op) |
838 | { |
839 | const struct k3dma_soc_data *soc_data; |
840 | struct k3_dma_dev *d; |
841 | int i, ret, irq = 0; |
842 | |
843 | d = devm_kzalloc(dev: &op->dev, size: sizeof(*d), GFP_KERNEL); |
844 | if (!d) |
845 | return -ENOMEM; |
846 | |
847 | soc_data = device_get_match_data(dev: &op->dev); |
848 | if (!soc_data) |
849 | return -EINVAL; |
850 | |
851 | d->base = devm_platform_ioremap_resource(pdev: op, index: 0); |
852 | if (IS_ERR(ptr: d->base)) |
853 | return PTR_ERR(ptr: d->base); |
854 | |
855 | of_property_read_u32(np: (&op->dev)->of_node, |
856 | propname: "dma-channels" , out_value: &d->dma_channels); |
857 | of_property_read_u32(np: (&op->dev)->of_node, |
858 | propname: "dma-requests" , out_value: &d->dma_requests); |
859 | ret = of_property_read_u32(np: (&op->dev)->of_node, |
860 | propname: "dma-channel-mask" , out_value: &d->dma_channel_mask); |
861 | if (ret) { |
862 | dev_warn(&op->dev, |
863 | "dma-channel-mask doesn't exist, considering all as available.\n" ); |
864 | d->dma_channel_mask = (u32)~0UL; |
865 | } |
866 | |
867 | if (!(soc_data->flags & K3_FLAG_NOCLK)) { |
868 | d->clk = devm_clk_get(dev: &op->dev, NULL); |
869 | if (IS_ERR(ptr: d->clk)) { |
870 | dev_err(&op->dev, "no dma clk\n" ); |
871 | return PTR_ERR(ptr: d->clk); |
872 | } |
873 | } |
874 | |
875 | irq = platform_get_irq(op, 0); |
876 | ret = devm_request_irq(dev: &op->dev, irq, |
877 | handler: k3_dma_int_handler, irqflags: 0, DRIVER_NAME, dev_id: d); |
878 | if (ret) |
879 | return ret; |
880 | |
881 | d->irq = irq; |
882 | |
883 | /* A DMA memory pool for LLIs, align on 32-byte boundary */ |
884 | d->pool = dmam_pool_create(DRIVER_NAME, dev: &op->dev, |
885 | LLI_BLOCK_SIZE, align: 32, allocation: 0); |
886 | if (!d->pool) |
887 | return -ENOMEM; |
888 | |
889 | /* init phy channel */ |
890 | d->phy = devm_kcalloc(dev: &op->dev, |
891 | n: d->dma_channels, size: sizeof(struct k3_dma_phy), GFP_KERNEL); |
892 | if (d->phy == NULL) |
893 | return -ENOMEM; |
894 | |
895 | for (i = 0; i < d->dma_channels; i++) { |
896 | struct k3_dma_phy *p; |
897 | |
898 | if (!(d->dma_channel_mask & BIT(i))) |
899 | continue; |
900 | |
901 | p = &d->phy[i]; |
902 | p->idx = i; |
903 | p->base = d->base + i * 0x40; |
904 | } |
905 | |
906 | INIT_LIST_HEAD(list: &d->slave.channels); |
907 | dma_cap_set(DMA_SLAVE, d->slave.cap_mask); |
908 | dma_cap_set(DMA_MEMCPY, d->slave.cap_mask); |
909 | dma_cap_set(DMA_CYCLIC, d->slave.cap_mask); |
910 | d->slave.dev = &op->dev; |
911 | d->slave.device_free_chan_resources = k3_dma_free_chan_resources; |
912 | d->slave.device_tx_status = k3_dma_tx_status; |
913 | d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy; |
914 | d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg; |
915 | d->slave.device_prep_dma_cyclic = k3_dma_prep_dma_cyclic; |
916 | d->slave.device_issue_pending = k3_dma_issue_pending; |
917 | d->slave.device_config = k3_dma_config; |
918 | d->slave.device_pause = k3_dma_transfer_pause; |
919 | d->slave.device_resume = k3_dma_transfer_resume; |
920 | d->slave.device_terminate_all = k3_dma_terminate_all; |
921 | d->slave.device_synchronize = k3_dma_synchronize; |
922 | d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES; |
923 | |
924 | /* init virtual channel */ |
925 | d->chans = devm_kcalloc(dev: &op->dev, |
926 | n: d->dma_requests, size: sizeof(struct k3_dma_chan), GFP_KERNEL); |
927 | if (d->chans == NULL) |
928 | return -ENOMEM; |
929 | |
930 | for (i = 0; i < d->dma_requests; i++) { |
931 | struct k3_dma_chan *c = &d->chans[i]; |
932 | |
933 | c->status = DMA_IN_PROGRESS; |
934 | INIT_LIST_HEAD(list: &c->node); |
935 | c->vc.desc_free = k3_dma_free_desc; |
936 | vchan_init(vc: &c->vc, dmadev: &d->slave); |
937 | } |
938 | |
939 | /* Enable clock before accessing registers */ |
940 | ret = clk_prepare_enable(clk: d->clk); |
941 | if (ret < 0) { |
942 | dev_err(&op->dev, "clk_prepare_enable failed: %d\n" , ret); |
943 | return ret; |
944 | } |
945 | |
946 | k3_dma_enable_dma(d, on: true); |
947 | |
948 | ret = dma_async_device_register(device: &d->slave); |
949 | if (ret) |
950 | goto dma_async_register_fail; |
951 | |
952 | ret = of_dma_controller_register(np: (&op->dev)->of_node, |
953 | of_dma_xlate: k3_of_dma_simple_xlate, data: d); |
954 | if (ret) |
955 | goto of_dma_register_fail; |
956 | |
957 | spin_lock_init(&d->lock); |
958 | INIT_LIST_HEAD(list: &d->chan_pending); |
959 | tasklet_setup(t: &d->task, callback: k3_dma_tasklet); |
960 | platform_set_drvdata(pdev: op, data: d); |
961 | dev_info(&op->dev, "initialized\n" ); |
962 | |
963 | return 0; |
964 | |
965 | of_dma_register_fail: |
966 | dma_async_device_unregister(device: &d->slave); |
967 | dma_async_register_fail: |
968 | clk_disable_unprepare(clk: d->clk); |
969 | return ret; |
970 | } |
971 | |
972 | static void k3_dma_remove(struct platform_device *op) |
973 | { |
974 | struct k3_dma_chan *c, *cn; |
975 | struct k3_dma_dev *d = platform_get_drvdata(pdev: op); |
976 | |
977 | dma_async_device_unregister(device: &d->slave); |
978 | of_dma_controller_free(np: (&op->dev)->of_node); |
979 | |
980 | devm_free_irq(dev: &op->dev, irq: d->irq, dev_id: d); |
981 | |
982 | list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) { |
983 | list_del(entry: &c->vc.chan.device_node); |
984 | tasklet_kill(t: &c->vc.task); |
985 | } |
986 | tasklet_kill(t: &d->task); |
987 | clk_disable_unprepare(clk: d->clk); |
988 | } |
989 | |
990 | #ifdef CONFIG_PM_SLEEP |
991 | static int k3_dma_suspend_dev(struct device *dev) |
992 | { |
993 | struct k3_dma_dev *d = dev_get_drvdata(dev); |
994 | u32 stat = 0; |
995 | |
996 | stat = k3_dma_get_chan_stat(d); |
997 | if (stat) { |
998 | dev_warn(d->slave.dev, |
999 | "chan %d is running fail to suspend\n" , stat); |
1000 | return -1; |
1001 | } |
1002 | k3_dma_enable_dma(d, on: false); |
1003 | clk_disable_unprepare(clk: d->clk); |
1004 | return 0; |
1005 | } |
1006 | |
1007 | static int k3_dma_resume_dev(struct device *dev) |
1008 | { |
1009 | struct k3_dma_dev *d = dev_get_drvdata(dev); |
1010 | int ret = 0; |
1011 | |
1012 | ret = clk_prepare_enable(clk: d->clk); |
1013 | if (ret < 0) { |
1014 | dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n" , ret); |
1015 | return ret; |
1016 | } |
1017 | k3_dma_enable_dma(d, on: true); |
1018 | return 0; |
1019 | } |
1020 | #endif |
1021 | |
1022 | static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend_dev, k3_dma_resume_dev); |
1023 | |
1024 | static struct platform_driver k3_pdma_driver = { |
1025 | .driver = { |
1026 | .name = DRIVER_NAME, |
1027 | .pm = &k3_dma_pmops, |
1028 | .of_match_table = k3_pdma_dt_ids, |
1029 | }, |
1030 | .probe = k3_dma_probe, |
1031 | .remove_new = k3_dma_remove, |
1032 | }; |
1033 | |
1034 | module_platform_driver(k3_pdma_driver); |
1035 | |
1036 | MODULE_DESCRIPTION("HiSilicon k3 DMA Driver" ); |
1037 | MODULE_ALIAS("platform:k3dma" ); |
1038 | MODULE_LICENSE("GPL v2" ); |
1039 | |