1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Dmaengine driver base library for DMA controllers, found on SH-based SoCs |
4 | * |
5 | * extracted from shdma.c |
6 | * |
7 | * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de> |
8 | * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> |
9 | * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. |
10 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. |
11 | */ |
12 | |
13 | #include <linux/delay.h> |
14 | #include <linux/shdma-base.h> |
15 | #include <linux/dmaengine.h> |
16 | #include <linux/init.h> |
17 | #include <linux/interrupt.h> |
18 | #include <linux/module.h> |
19 | #include <linux/pm_runtime.h> |
20 | #include <linux/slab.h> |
21 | #include <linux/spinlock.h> |
22 | |
23 | #include "../dmaengine.h" |
24 | |
25 | /* DMA descriptor control */ |
26 | enum shdma_desc_status { |
27 | DESC_IDLE, |
28 | DESC_PREPARED, |
29 | DESC_SUBMITTED, |
30 | DESC_COMPLETED, /* completed, have to call callback */ |
31 | DESC_WAITING, /* callback called, waiting for ack / re-submit */ |
32 | }; |
33 | |
34 | #define NR_DESCS_PER_CHANNEL 32 |
35 | |
36 | #define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan) |
37 | #define to_shdma_dev(d) container_of(d, struct shdma_dev, dma_dev) |
38 | |
39 | /* |
40 | * For slave DMA we assume, that there is a finite number of DMA slaves in the |
41 | * system, and that each such slave can only use a finite number of channels. |
42 | * We use slave channel IDs to make sure, that no such slave channel ID is |
43 | * allocated more than once. |
44 | */ |
45 | static unsigned int slave_num = 256; |
46 | module_param(slave_num, uint, 0444); |
47 | |
48 | /* A bitmask with slave_num bits */ |
49 | static unsigned long *shdma_slave_used; |
50 | |
51 | /* Called under spin_lock_irq(&schan->chan_lock") */ |
52 | static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan) |
53 | { |
54 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); |
55 | const struct shdma_ops *ops = sdev->ops; |
56 | struct shdma_desc *sdesc; |
57 | |
58 | /* DMA work check */ |
59 | if (ops->channel_busy(schan)) |
60 | return; |
61 | |
62 | /* Find the first not transferred descriptor */ |
63 | list_for_each_entry(sdesc, &schan->ld_queue, node) |
64 | if (sdesc->mark == DESC_SUBMITTED) { |
65 | ops->start_xfer(schan, sdesc); |
66 | break; |
67 | } |
68 | } |
69 | |
70 | static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx) |
71 | { |
72 | struct shdma_desc *chunk, *c, *desc = |
73 | container_of(tx, struct shdma_desc, async_tx); |
74 | struct shdma_chan *schan = to_shdma_chan(tx->chan); |
75 | dma_async_tx_callback callback = tx->callback; |
76 | dma_cookie_t cookie; |
77 | bool power_up; |
78 | |
79 | spin_lock_irq(lock: &schan->chan_lock); |
80 | |
81 | power_up = list_empty(head: &schan->ld_queue); |
82 | |
83 | cookie = dma_cookie_assign(tx); |
84 | |
85 | /* Mark all chunks of this descriptor as submitted, move to the queue */ |
86 | list_for_each_entry_safe(chunk, c, desc->node.prev, node) { |
87 | /* |
88 | * All chunks are on the global ld_free, so, we have to find |
89 | * the end of the chain ourselves |
90 | */ |
91 | if (chunk != desc && (chunk->mark == DESC_IDLE || |
92 | chunk->async_tx.cookie > 0 || |
93 | chunk->async_tx.cookie == -EBUSY || |
94 | &chunk->node == &schan->ld_free)) |
95 | break; |
96 | chunk->mark = DESC_SUBMITTED; |
97 | if (chunk->chunks == 1) { |
98 | chunk->async_tx.callback = callback; |
99 | chunk->async_tx.callback_param = tx->callback_param; |
100 | } else { |
101 | /* Callback goes to the last chunk */ |
102 | chunk->async_tx.callback = NULL; |
103 | } |
104 | chunk->cookie = cookie; |
105 | list_move_tail(list: &chunk->node, head: &schan->ld_queue); |
106 | |
107 | dev_dbg(schan->dev, "submit #%d@%p on %d\n" , |
108 | tx->cookie, &chunk->async_tx, schan->id); |
109 | } |
110 | |
111 | if (power_up) { |
112 | int ret; |
113 | schan->pm_state = SHDMA_PM_BUSY; |
114 | |
115 | ret = pm_runtime_get(dev: schan->dev); |
116 | |
117 | spin_unlock_irq(lock: &schan->chan_lock); |
118 | if (ret < 0) |
119 | dev_err(schan->dev, "%s(): GET = %d\n" , __func__, ret); |
120 | |
121 | pm_runtime_barrier(dev: schan->dev); |
122 | |
123 | spin_lock_irq(lock: &schan->chan_lock); |
124 | |
125 | /* Have we been reset, while waiting? */ |
126 | if (schan->pm_state != SHDMA_PM_ESTABLISHED) { |
127 | struct shdma_dev *sdev = |
128 | to_shdma_dev(schan->dma_chan.device); |
129 | const struct shdma_ops *ops = sdev->ops; |
130 | dev_dbg(schan->dev, "Bring up channel %d\n" , |
131 | schan->id); |
132 | /* |
133 | * TODO: .xfer_setup() might fail on some platforms. |
134 | * Make it int then, on error remove chunks from the |
135 | * queue again |
136 | */ |
137 | ops->setup_xfer(schan, schan->slave_id); |
138 | |
139 | if (schan->pm_state == SHDMA_PM_PENDING) |
140 | shdma_chan_xfer_ld_queue(schan); |
141 | schan->pm_state = SHDMA_PM_ESTABLISHED; |
142 | } |
143 | } else { |
144 | /* |
145 | * Tell .device_issue_pending() not to run the queue, interrupts |
146 | * will do it anyway |
147 | */ |
148 | schan->pm_state = SHDMA_PM_PENDING; |
149 | } |
150 | |
151 | spin_unlock_irq(lock: &schan->chan_lock); |
152 | |
153 | return cookie; |
154 | } |
155 | |
156 | /* Called with desc_lock held */ |
157 | static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan) |
158 | { |
159 | struct shdma_desc *sdesc; |
160 | |
161 | list_for_each_entry(sdesc, &schan->ld_free, node) |
162 | if (sdesc->mark != DESC_PREPARED) { |
163 | BUG_ON(sdesc->mark != DESC_IDLE); |
164 | list_del(entry: &sdesc->node); |
165 | return sdesc; |
166 | } |
167 | |
168 | return NULL; |
169 | } |
170 | |
171 | static int shdma_setup_slave(struct shdma_chan *schan, dma_addr_t slave_addr) |
172 | { |
173 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); |
174 | const struct shdma_ops *ops = sdev->ops; |
175 | int ret, match; |
176 | |
177 | if (schan->dev->of_node) { |
178 | match = schan->hw_req; |
179 | ret = ops->set_slave(schan, match, slave_addr, true); |
180 | if (ret < 0) |
181 | return ret; |
182 | } else { |
183 | match = schan->real_slave_id; |
184 | } |
185 | |
186 | if (schan->real_slave_id < 0 || schan->real_slave_id >= slave_num) |
187 | return -EINVAL; |
188 | |
189 | if (test_and_set_bit(nr: schan->real_slave_id, addr: shdma_slave_used)) |
190 | return -EBUSY; |
191 | |
192 | ret = ops->set_slave(schan, match, slave_addr, false); |
193 | if (ret < 0) { |
194 | clear_bit(nr: schan->real_slave_id, addr: shdma_slave_used); |
195 | return ret; |
196 | } |
197 | |
198 | schan->slave_id = schan->real_slave_id; |
199 | |
200 | return 0; |
201 | } |
202 | |
203 | static int shdma_alloc_chan_resources(struct dma_chan *chan) |
204 | { |
205 | struct shdma_chan *schan = to_shdma_chan(chan); |
206 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); |
207 | const struct shdma_ops *ops = sdev->ops; |
208 | struct shdma_desc *desc; |
209 | struct shdma_slave *slave = chan->private; |
210 | int ret, i; |
211 | |
212 | /* |
213 | * This relies on the guarantee from dmaengine that alloc_chan_resources |
214 | * never runs concurrently with itself or free_chan_resources. |
215 | */ |
216 | if (slave) { |
217 | /* Legacy mode: .private is set in filter */ |
218 | schan->real_slave_id = slave->slave_id; |
219 | ret = shdma_setup_slave(schan, slave_addr: 0); |
220 | if (ret < 0) |
221 | goto esetslave; |
222 | } else { |
223 | /* Normal mode: real_slave_id was set by filter */ |
224 | schan->slave_id = -EINVAL; |
225 | } |
226 | |
227 | schan->desc = kcalloc(NR_DESCS_PER_CHANNEL, |
228 | size: sdev->desc_size, GFP_KERNEL); |
229 | if (!schan->desc) { |
230 | ret = -ENOMEM; |
231 | goto edescalloc; |
232 | } |
233 | schan->desc_num = NR_DESCS_PER_CHANNEL; |
234 | |
235 | for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) { |
236 | desc = ops->embedded_desc(schan->desc, i); |
237 | dma_async_tx_descriptor_init(tx: &desc->async_tx, |
238 | chan: &schan->dma_chan); |
239 | desc->async_tx.tx_submit = shdma_tx_submit; |
240 | desc->mark = DESC_IDLE; |
241 | |
242 | list_add(new: &desc->node, head: &schan->ld_free); |
243 | } |
244 | |
245 | return NR_DESCS_PER_CHANNEL; |
246 | |
247 | edescalloc: |
248 | if (slave) |
249 | esetslave: |
250 | clear_bit(nr: slave->slave_id, addr: shdma_slave_used); |
251 | chan->private = NULL; |
252 | return ret; |
253 | } |
254 | |
255 | /* |
256 | * This is the standard shdma filter function to be used as a replacement to the |
257 | * "old" method, using the .private pointer. |
258 | * You always have to pass a valid slave id as the argument, old drivers that |
259 | * pass ERR_PTR(-EINVAL) as a filter parameter and set it up in dma_slave_config |
260 | * need to be updated so we can remove the slave_id field from dma_slave_config. |
261 | * parameter. If this filter is used, the slave driver, after calling |
262 | * dma_request_channel(), will also have to call dmaengine_slave_config() with |
263 | * .direction, and either .src_addr or .dst_addr set. |
264 | * |
265 | * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE |
266 | * capability! If this becomes a requirement, hardware glue drivers, using this |
267 | * services would have to provide their own filters, which first would check |
268 | * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do |
269 | * this, and only then, in case of a match, call this common filter. |
270 | * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate(). |
271 | * In that case the MID-RID value is used for slave channel filtering and is |
272 | * passed to this function in the "arg" parameter. |
273 | */ |
274 | bool shdma_chan_filter(struct dma_chan *chan, void *arg) |
275 | { |
276 | struct shdma_chan *schan; |
277 | struct shdma_dev *sdev; |
278 | int slave_id = (long)arg; |
279 | int ret; |
280 | |
281 | /* Only support channels handled by this driver. */ |
282 | if (chan->device->device_alloc_chan_resources != |
283 | shdma_alloc_chan_resources) |
284 | return false; |
285 | |
286 | schan = to_shdma_chan(chan); |
287 | sdev = to_shdma_dev(chan->device); |
288 | |
289 | /* |
290 | * For DT, the schan->slave_id field is generated by the |
291 | * set_slave function from the slave ID that is passed in |
292 | * from xlate. For the non-DT case, the slave ID is |
293 | * directly passed into the filter function by the driver |
294 | */ |
295 | if (schan->dev->of_node) { |
296 | ret = sdev->ops->set_slave(schan, slave_id, 0, true); |
297 | if (ret < 0) |
298 | return false; |
299 | |
300 | schan->real_slave_id = schan->slave_id; |
301 | return true; |
302 | } |
303 | |
304 | if (slave_id < 0) { |
305 | /* No slave requested - arbitrary channel */ |
306 | dev_warn(sdev->dma_dev.dev, "invalid slave ID passed to dma_request_slave\n" ); |
307 | return true; |
308 | } |
309 | |
310 | if (slave_id >= slave_num) |
311 | return false; |
312 | |
313 | ret = sdev->ops->set_slave(schan, slave_id, 0, true); |
314 | if (ret < 0) |
315 | return false; |
316 | |
317 | schan->real_slave_id = slave_id; |
318 | |
319 | return true; |
320 | } |
321 | EXPORT_SYMBOL(shdma_chan_filter); |
322 | |
323 | static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) |
324 | { |
325 | struct shdma_desc *desc, *_desc; |
326 | /* Is the "exposed" head of a chain acked? */ |
327 | bool head_acked = false; |
328 | dma_cookie_t cookie = 0; |
329 | dma_async_tx_callback callback = NULL; |
330 | struct dmaengine_desc_callback cb; |
331 | unsigned long flags; |
332 | LIST_HEAD(cyclic_list); |
333 | |
334 | memset(&cb, 0, sizeof(cb)); |
335 | spin_lock_irqsave(&schan->chan_lock, flags); |
336 | list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) { |
337 | struct dma_async_tx_descriptor *tx = &desc->async_tx; |
338 | |
339 | BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie); |
340 | BUG_ON(desc->mark != DESC_SUBMITTED && |
341 | desc->mark != DESC_COMPLETED && |
342 | desc->mark != DESC_WAITING); |
343 | |
344 | /* |
345 | * queue is ordered, and we use this loop to (1) clean up all |
346 | * completed descriptors, and to (2) update descriptor flags of |
347 | * any chunks in a (partially) completed chain |
348 | */ |
349 | if (!all && desc->mark == DESC_SUBMITTED && |
350 | desc->cookie != cookie) |
351 | break; |
352 | |
353 | if (tx->cookie > 0) |
354 | cookie = tx->cookie; |
355 | |
356 | if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { |
357 | if (schan->dma_chan.completed_cookie != desc->cookie - 1) |
358 | dev_dbg(schan->dev, |
359 | "Completing cookie %d, expected %d\n" , |
360 | desc->cookie, |
361 | schan->dma_chan.completed_cookie + 1); |
362 | schan->dma_chan.completed_cookie = desc->cookie; |
363 | } |
364 | |
365 | /* Call callback on the last chunk */ |
366 | if (desc->mark == DESC_COMPLETED && tx->callback) { |
367 | desc->mark = DESC_WAITING; |
368 | dmaengine_desc_get_callback(tx, cb: &cb); |
369 | callback = tx->callback; |
370 | dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n" , |
371 | tx->cookie, tx, schan->id); |
372 | BUG_ON(desc->chunks != 1); |
373 | break; |
374 | } |
375 | |
376 | if (tx->cookie > 0 || tx->cookie == -EBUSY) { |
377 | if (desc->mark == DESC_COMPLETED) { |
378 | BUG_ON(tx->cookie < 0); |
379 | desc->mark = DESC_WAITING; |
380 | } |
381 | head_acked = async_tx_test_ack(tx); |
382 | } else { |
383 | switch (desc->mark) { |
384 | case DESC_COMPLETED: |
385 | desc->mark = DESC_WAITING; |
386 | fallthrough; |
387 | case DESC_WAITING: |
388 | if (head_acked) |
389 | async_tx_ack(tx: &desc->async_tx); |
390 | } |
391 | } |
392 | |
393 | dev_dbg(schan->dev, "descriptor %p #%d completed.\n" , |
394 | tx, tx->cookie); |
395 | |
396 | if (((desc->mark == DESC_COMPLETED || |
397 | desc->mark == DESC_WAITING) && |
398 | async_tx_test_ack(tx: &desc->async_tx)) || all) { |
399 | |
400 | if (all || !desc->cyclic) { |
401 | /* Remove from ld_queue list */ |
402 | desc->mark = DESC_IDLE; |
403 | list_move(list: &desc->node, head: &schan->ld_free); |
404 | } else { |
405 | /* reuse as cyclic */ |
406 | desc->mark = DESC_SUBMITTED; |
407 | list_move_tail(list: &desc->node, head: &cyclic_list); |
408 | } |
409 | |
410 | if (list_empty(head: &schan->ld_queue)) { |
411 | dev_dbg(schan->dev, "Bring down channel %d\n" , schan->id); |
412 | pm_runtime_put(dev: schan->dev); |
413 | schan->pm_state = SHDMA_PM_ESTABLISHED; |
414 | } else if (schan->pm_state == SHDMA_PM_PENDING) { |
415 | shdma_chan_xfer_ld_queue(schan); |
416 | } |
417 | } |
418 | } |
419 | |
420 | if (all && !callback) |
421 | /* |
422 | * Terminating and the loop completed normally: forgive |
423 | * uncompleted cookies |
424 | */ |
425 | schan->dma_chan.completed_cookie = schan->dma_chan.cookie; |
426 | |
427 | list_splice_tail(list: &cyclic_list, head: &schan->ld_queue); |
428 | |
429 | spin_unlock_irqrestore(lock: &schan->chan_lock, flags); |
430 | |
431 | dmaengine_desc_callback_invoke(cb: &cb, NULL); |
432 | |
433 | return callback; |
434 | } |
435 | |
436 | /* |
437 | * shdma_chan_ld_cleanup - Clean up link descriptors |
438 | * |
439 | * Clean up the ld_queue of DMA channel. |
440 | */ |
441 | static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all) |
442 | { |
443 | while (__ld_cleanup(schan, all)) |
444 | ; |
445 | } |
446 | |
447 | /* |
448 | * shdma_free_chan_resources - Free all resources of the channel. |
449 | */ |
450 | static void shdma_free_chan_resources(struct dma_chan *chan) |
451 | { |
452 | struct shdma_chan *schan = to_shdma_chan(chan); |
453 | struct shdma_dev *sdev = to_shdma_dev(chan->device); |
454 | const struct shdma_ops *ops = sdev->ops; |
455 | LIST_HEAD(list); |
456 | |
457 | /* Protect against ISR */ |
458 | spin_lock_irq(lock: &schan->chan_lock); |
459 | ops->halt_channel(schan); |
460 | spin_unlock_irq(lock: &schan->chan_lock); |
461 | |
462 | /* Now no new interrupts will occur */ |
463 | |
464 | /* Prepared and not submitted descriptors can still be on the queue */ |
465 | if (!list_empty(head: &schan->ld_queue)) |
466 | shdma_chan_ld_cleanup(schan, all: true); |
467 | |
468 | if (schan->slave_id >= 0) { |
469 | /* The caller is holding dma_list_mutex */ |
470 | clear_bit(nr: schan->slave_id, addr: shdma_slave_used); |
471 | chan->private = NULL; |
472 | } |
473 | |
474 | schan->real_slave_id = 0; |
475 | |
476 | spin_lock_irq(lock: &schan->chan_lock); |
477 | |
478 | list_splice_init(list: &schan->ld_free, head: &list); |
479 | schan->desc_num = 0; |
480 | |
481 | spin_unlock_irq(lock: &schan->chan_lock); |
482 | |
483 | kfree(objp: schan->desc); |
484 | } |
485 | |
486 | /** |
487 | * shdma_add_desc - get, set up and return one transfer descriptor |
488 | * @schan: DMA channel |
489 | * @flags: DMA transfer flags |
490 | * @dst: destination DMA address, incremented when direction equals |
491 | * DMA_DEV_TO_MEM or DMA_MEM_TO_MEM |
492 | * @src: source DMA address, incremented when direction equals |
493 | * DMA_MEM_TO_DEV or DMA_MEM_TO_MEM |
494 | * @len: DMA transfer length |
495 | * @first: if NULL, set to the current descriptor and cookie set to -EBUSY |
496 | * @direction: needed for slave DMA to decide which address to keep constant, |
497 | * equals DMA_MEM_TO_MEM for MEMCPY |
498 | * Returns 0 or an error |
499 | * Locks: called with desc_lock held |
500 | */ |
501 | static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan, |
502 | unsigned long flags, dma_addr_t *dst, dma_addr_t *src, size_t *len, |
503 | struct shdma_desc **first, enum dma_transfer_direction direction) |
504 | { |
505 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); |
506 | const struct shdma_ops *ops = sdev->ops; |
507 | struct shdma_desc *new; |
508 | size_t copy_size = *len; |
509 | |
510 | if (!copy_size) |
511 | return NULL; |
512 | |
513 | /* Allocate the link descriptor from the free list */ |
514 | new = shdma_get_desc(schan); |
515 | if (!new) { |
516 | dev_err(schan->dev, "No free link descriptor available\n" ); |
517 | return NULL; |
518 | } |
519 | |
520 | ops->desc_setup(schan, new, *src, *dst, ©_size); |
521 | |
522 | if (!*first) { |
523 | /* First desc */ |
524 | new->async_tx.cookie = -EBUSY; |
525 | *first = new; |
526 | } else { |
527 | /* Other desc - invisible to the user */ |
528 | new->async_tx.cookie = -EINVAL; |
529 | } |
530 | |
531 | dev_dbg(schan->dev, |
532 | "chaining (%zu/%zu)@%pad -> %pad with %p, cookie %d\n" , |
533 | copy_size, *len, src, dst, &new->async_tx, |
534 | new->async_tx.cookie); |
535 | |
536 | new->mark = DESC_PREPARED; |
537 | new->async_tx.flags = flags; |
538 | new->direction = direction; |
539 | new->partial = 0; |
540 | |
541 | *len -= copy_size; |
542 | if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV) |
543 | *src += copy_size; |
544 | if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM) |
545 | *dst += copy_size; |
546 | |
547 | return new; |
548 | } |
549 | |
550 | /* |
551 | * shdma_prep_sg - prepare transfer descriptors from an SG list |
552 | * |
553 | * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also |
554 | * converted to scatter-gather to guarantee consistent locking and a correct |
555 | * list manipulation. For slave DMA direction carries the usual meaning, and, |
556 | * logically, the SG list is RAM and the addr variable contains slave address, |
557 | * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM |
558 | * and the SG list contains only one element and points at the source buffer. |
559 | */ |
560 | static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan, |
561 | struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, |
562 | enum dma_transfer_direction direction, unsigned long flags, bool cyclic) |
563 | { |
564 | struct scatterlist *sg; |
565 | struct shdma_desc *first = NULL, *new = NULL /* compiler... */; |
566 | LIST_HEAD(tx_list); |
567 | int chunks = 0; |
568 | unsigned long irq_flags; |
569 | int i; |
570 | |
571 | for_each_sg(sgl, sg, sg_len, i) |
572 | chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len); |
573 | |
574 | /* Have to lock the whole loop to protect against concurrent release */ |
575 | spin_lock_irqsave(&schan->chan_lock, irq_flags); |
576 | |
577 | /* |
578 | * Chaining: |
579 | * first descriptor is what user is dealing with in all API calls, its |
580 | * cookie is at first set to -EBUSY, at tx-submit to a positive |
581 | * number |
582 | * if more than one chunk is needed further chunks have cookie = -EINVAL |
583 | * the last chunk, if not equal to the first, has cookie = -ENOSPC |
584 | * all chunks are linked onto the tx_list head with their .node heads |
585 | * only during this function, then they are immediately spliced |
586 | * back onto the free list in form of a chain |
587 | */ |
588 | for_each_sg(sgl, sg, sg_len, i) { |
589 | dma_addr_t sg_addr = sg_dma_address(sg); |
590 | size_t len = sg_dma_len(sg); |
591 | |
592 | if (!len) |
593 | goto err_get_desc; |
594 | |
595 | do { |
596 | dev_dbg(schan->dev, "Add SG #%d@%p[%zu], dma %pad\n" , |
597 | i, sg, len, &sg_addr); |
598 | |
599 | if (direction == DMA_DEV_TO_MEM) |
600 | new = shdma_add_desc(schan, flags, |
601 | dst: &sg_addr, src: addr, len: &len, first: &first, |
602 | direction); |
603 | else |
604 | new = shdma_add_desc(schan, flags, |
605 | dst: addr, src: &sg_addr, len: &len, first: &first, |
606 | direction); |
607 | if (!new) |
608 | goto err_get_desc; |
609 | |
610 | new->cyclic = cyclic; |
611 | if (cyclic) |
612 | new->chunks = 1; |
613 | else |
614 | new->chunks = chunks--; |
615 | list_add_tail(new: &new->node, head: &tx_list); |
616 | } while (len); |
617 | } |
618 | |
619 | if (new != first) |
620 | new->async_tx.cookie = -ENOSPC; |
621 | |
622 | /* Put them back on the free list, so, they don't get lost */ |
623 | list_splice_tail(list: &tx_list, head: &schan->ld_free); |
624 | |
625 | spin_unlock_irqrestore(lock: &schan->chan_lock, flags: irq_flags); |
626 | |
627 | return &first->async_tx; |
628 | |
629 | err_get_desc: |
630 | list_for_each_entry(new, &tx_list, node) |
631 | new->mark = DESC_IDLE; |
632 | list_splice(list: &tx_list, head: &schan->ld_free); |
633 | |
634 | spin_unlock_irqrestore(lock: &schan->chan_lock, flags: irq_flags); |
635 | |
636 | return NULL; |
637 | } |
638 | |
639 | static struct dma_async_tx_descriptor *shdma_prep_memcpy( |
640 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, |
641 | size_t len, unsigned long flags) |
642 | { |
643 | struct shdma_chan *schan = to_shdma_chan(chan); |
644 | struct scatterlist sg; |
645 | |
646 | if (!chan || !len) |
647 | return NULL; |
648 | |
649 | BUG_ON(!schan->desc_num); |
650 | |
651 | sg_init_table(&sg, 1); |
652 | sg_set_page(sg: &sg, pfn_to_page(PFN_DOWN(dma_src)), len, |
653 | offset_in_page(dma_src)); |
654 | sg_dma_address(&sg) = dma_src; |
655 | sg_dma_len(&sg) = len; |
656 | |
657 | return shdma_prep_sg(schan, sgl: &sg, sg_len: 1, addr: &dma_dest, direction: DMA_MEM_TO_MEM, |
658 | flags, cyclic: false); |
659 | } |
660 | |
661 | static struct dma_async_tx_descriptor *shdma_prep_slave_sg( |
662 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, |
663 | enum dma_transfer_direction direction, unsigned long flags, void *context) |
664 | { |
665 | struct shdma_chan *schan = to_shdma_chan(chan); |
666 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); |
667 | const struct shdma_ops *ops = sdev->ops; |
668 | int slave_id = schan->slave_id; |
669 | dma_addr_t slave_addr; |
670 | |
671 | if (!chan) |
672 | return NULL; |
673 | |
674 | BUG_ON(!schan->desc_num); |
675 | |
676 | /* Someone calling slave DMA on a generic channel? */ |
677 | if (slave_id < 0 || !sg_len) { |
678 | dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n" , |
679 | __func__, sg_len, slave_id); |
680 | return NULL; |
681 | } |
682 | |
683 | slave_addr = ops->slave_addr(schan); |
684 | |
685 | return shdma_prep_sg(schan, sgl, sg_len, addr: &slave_addr, |
686 | direction, flags, cyclic: false); |
687 | } |
688 | |
689 | #define SHDMA_MAX_SG_LEN 32 |
690 | |
691 | static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( |
692 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, |
693 | size_t period_len, enum dma_transfer_direction direction, |
694 | unsigned long flags) |
695 | { |
696 | struct shdma_chan *schan = to_shdma_chan(chan); |
697 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); |
698 | struct dma_async_tx_descriptor *desc; |
699 | const struct shdma_ops *ops = sdev->ops; |
700 | unsigned int sg_len = buf_len / period_len; |
701 | int slave_id = schan->slave_id; |
702 | dma_addr_t slave_addr; |
703 | struct scatterlist *sgl; |
704 | int i; |
705 | |
706 | if (!chan) |
707 | return NULL; |
708 | |
709 | BUG_ON(!schan->desc_num); |
710 | |
711 | if (sg_len > SHDMA_MAX_SG_LEN) { |
712 | dev_err(schan->dev, "sg length %d exceeds limit %d" , |
713 | sg_len, SHDMA_MAX_SG_LEN); |
714 | return NULL; |
715 | } |
716 | |
717 | /* Someone calling slave DMA on a generic channel? */ |
718 | if (slave_id < 0 || (buf_len < period_len)) { |
719 | dev_warn(schan->dev, |
720 | "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n" , |
721 | __func__, buf_len, period_len, slave_id); |
722 | return NULL; |
723 | } |
724 | |
725 | slave_addr = ops->slave_addr(schan); |
726 | |
727 | /* |
728 | * Allocate the sg list dynamically as it would consumer too much stack |
729 | * space. |
730 | */ |
731 | sgl = kmalloc_array(n: sg_len, size: sizeof(*sgl), GFP_KERNEL); |
732 | if (!sgl) |
733 | return NULL; |
734 | |
735 | sg_init_table(sgl, sg_len); |
736 | |
737 | for (i = 0; i < sg_len; i++) { |
738 | dma_addr_t src = buf_addr + (period_len * i); |
739 | |
740 | sg_set_page(sg: &sgl[i], pfn_to_page(PFN_DOWN(src)), len: period_len, |
741 | offset_in_page(src)); |
742 | sg_dma_address(&sgl[i]) = src; |
743 | sg_dma_len(&sgl[i]) = period_len; |
744 | } |
745 | |
746 | desc = shdma_prep_sg(schan, sgl, sg_len, addr: &slave_addr, |
747 | direction, flags, cyclic: true); |
748 | |
749 | kfree(objp: sgl); |
750 | return desc; |
751 | } |
752 | |
753 | static int shdma_terminate_all(struct dma_chan *chan) |
754 | { |
755 | struct shdma_chan *schan = to_shdma_chan(chan); |
756 | struct shdma_dev *sdev = to_shdma_dev(chan->device); |
757 | const struct shdma_ops *ops = sdev->ops; |
758 | unsigned long flags; |
759 | |
760 | spin_lock_irqsave(&schan->chan_lock, flags); |
761 | ops->halt_channel(schan); |
762 | |
763 | if (ops->get_partial && !list_empty(head: &schan->ld_queue)) { |
764 | /* Record partial transfer */ |
765 | struct shdma_desc *desc = list_first_entry(&schan->ld_queue, |
766 | struct shdma_desc, node); |
767 | desc->partial = ops->get_partial(schan, desc); |
768 | } |
769 | |
770 | spin_unlock_irqrestore(lock: &schan->chan_lock, flags); |
771 | |
772 | shdma_chan_ld_cleanup(schan, all: true); |
773 | |
774 | return 0; |
775 | } |
776 | |
777 | static int shdma_config(struct dma_chan *chan, |
778 | struct dma_slave_config *config) |
779 | { |
780 | struct shdma_chan *schan = to_shdma_chan(chan); |
781 | |
782 | /* |
783 | * So far only .slave_id is used, but the slave drivers are |
784 | * encouraged to also set a transfer direction and an address. |
785 | */ |
786 | if (!config) |
787 | return -EINVAL; |
788 | |
789 | /* |
790 | * We could lock this, but you shouldn't be configuring the |
791 | * channel, while using it... |
792 | */ |
793 | return shdma_setup_slave(schan, |
794 | slave_addr: config->direction == DMA_DEV_TO_MEM ? |
795 | config->src_addr : config->dst_addr); |
796 | } |
797 | |
798 | static void shdma_issue_pending(struct dma_chan *chan) |
799 | { |
800 | struct shdma_chan *schan = to_shdma_chan(chan); |
801 | |
802 | spin_lock_irq(lock: &schan->chan_lock); |
803 | if (schan->pm_state == SHDMA_PM_ESTABLISHED) |
804 | shdma_chan_xfer_ld_queue(schan); |
805 | else |
806 | schan->pm_state = SHDMA_PM_PENDING; |
807 | spin_unlock_irq(lock: &schan->chan_lock); |
808 | } |
809 | |
810 | static enum dma_status shdma_tx_status(struct dma_chan *chan, |
811 | dma_cookie_t cookie, |
812 | struct dma_tx_state *txstate) |
813 | { |
814 | struct shdma_chan *schan = to_shdma_chan(chan); |
815 | enum dma_status status; |
816 | unsigned long flags; |
817 | |
818 | shdma_chan_ld_cleanup(schan, all: false); |
819 | |
820 | spin_lock_irqsave(&schan->chan_lock, flags); |
821 | |
822 | status = dma_cookie_status(chan, cookie, state: txstate); |
823 | |
824 | /* |
825 | * If we don't find cookie on the queue, it has been aborted and we have |
826 | * to report error |
827 | */ |
828 | if (status != DMA_COMPLETE) { |
829 | struct shdma_desc *sdesc; |
830 | status = DMA_ERROR; |
831 | list_for_each_entry(sdesc, &schan->ld_queue, node) |
832 | if (sdesc->cookie == cookie) { |
833 | status = DMA_IN_PROGRESS; |
834 | break; |
835 | } |
836 | } |
837 | |
838 | spin_unlock_irqrestore(lock: &schan->chan_lock, flags); |
839 | |
840 | return status; |
841 | } |
842 | |
843 | /* Called from error IRQ or NMI */ |
844 | bool shdma_reset(struct shdma_dev *sdev) |
845 | { |
846 | const struct shdma_ops *ops = sdev->ops; |
847 | struct shdma_chan *schan; |
848 | unsigned int handled = 0; |
849 | int i; |
850 | |
851 | /* Reset all channels */ |
852 | shdma_for_each_chan(schan, sdev, i) { |
853 | struct shdma_desc *sdesc; |
854 | LIST_HEAD(dl); |
855 | |
856 | if (!schan) |
857 | continue; |
858 | |
859 | spin_lock(lock: &schan->chan_lock); |
860 | |
861 | /* Stop the channel */ |
862 | ops->halt_channel(schan); |
863 | |
864 | list_splice_init(list: &schan->ld_queue, head: &dl); |
865 | |
866 | if (!list_empty(head: &dl)) { |
867 | dev_dbg(schan->dev, "Bring down channel %d\n" , schan->id); |
868 | pm_runtime_put(dev: schan->dev); |
869 | } |
870 | schan->pm_state = SHDMA_PM_ESTABLISHED; |
871 | |
872 | spin_unlock(lock: &schan->chan_lock); |
873 | |
874 | /* Complete all */ |
875 | list_for_each_entry(sdesc, &dl, node) { |
876 | struct dma_async_tx_descriptor *tx = &sdesc->async_tx; |
877 | |
878 | sdesc->mark = DESC_IDLE; |
879 | dmaengine_desc_get_callback_invoke(tx, NULL); |
880 | } |
881 | |
882 | spin_lock(lock: &schan->chan_lock); |
883 | list_splice(list: &dl, head: &schan->ld_free); |
884 | spin_unlock(lock: &schan->chan_lock); |
885 | |
886 | handled++; |
887 | } |
888 | |
889 | return !!handled; |
890 | } |
891 | EXPORT_SYMBOL(shdma_reset); |
892 | |
893 | static irqreturn_t chan_irq(int irq, void *dev) |
894 | { |
895 | struct shdma_chan *schan = dev; |
896 | const struct shdma_ops *ops = |
897 | to_shdma_dev(schan->dma_chan.device)->ops; |
898 | irqreturn_t ret; |
899 | |
900 | spin_lock(lock: &schan->chan_lock); |
901 | |
902 | ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE; |
903 | |
904 | spin_unlock(lock: &schan->chan_lock); |
905 | |
906 | return ret; |
907 | } |
908 | |
909 | static irqreturn_t chan_irqt(int irq, void *dev) |
910 | { |
911 | struct shdma_chan *schan = dev; |
912 | const struct shdma_ops *ops = |
913 | to_shdma_dev(schan->dma_chan.device)->ops; |
914 | struct shdma_desc *sdesc; |
915 | |
916 | spin_lock_irq(lock: &schan->chan_lock); |
917 | list_for_each_entry(sdesc, &schan->ld_queue, node) { |
918 | if (sdesc->mark == DESC_SUBMITTED && |
919 | ops->desc_completed(schan, sdesc)) { |
920 | dev_dbg(schan->dev, "done #%d@%p\n" , |
921 | sdesc->async_tx.cookie, &sdesc->async_tx); |
922 | sdesc->mark = DESC_COMPLETED; |
923 | break; |
924 | } |
925 | } |
926 | /* Next desc */ |
927 | shdma_chan_xfer_ld_queue(schan); |
928 | spin_unlock_irq(lock: &schan->chan_lock); |
929 | |
930 | shdma_chan_ld_cleanup(schan, all: false); |
931 | |
932 | return IRQ_HANDLED; |
933 | } |
934 | |
935 | int shdma_request_irq(struct shdma_chan *schan, int irq, |
936 | unsigned long flags, const char *name) |
937 | { |
938 | int ret = devm_request_threaded_irq(dev: schan->dev, irq, handler: chan_irq, |
939 | thread_fn: chan_irqt, irqflags: flags, devname: name, dev_id: schan); |
940 | |
941 | schan->irq = ret < 0 ? ret : irq; |
942 | |
943 | return ret; |
944 | } |
945 | EXPORT_SYMBOL(shdma_request_irq); |
946 | |
947 | void shdma_chan_probe(struct shdma_dev *sdev, |
948 | struct shdma_chan *schan, int id) |
949 | { |
950 | schan->pm_state = SHDMA_PM_ESTABLISHED; |
951 | |
952 | /* reference struct dma_device */ |
953 | schan->dma_chan.device = &sdev->dma_dev; |
954 | dma_cookie_init(chan: &schan->dma_chan); |
955 | |
956 | schan->dev = sdev->dma_dev.dev; |
957 | schan->id = id; |
958 | |
959 | if (!schan->max_xfer_len) |
960 | schan->max_xfer_len = PAGE_SIZE; |
961 | |
962 | spin_lock_init(&schan->chan_lock); |
963 | |
964 | /* Init descripter manage list */ |
965 | INIT_LIST_HEAD(list: &schan->ld_queue); |
966 | INIT_LIST_HEAD(list: &schan->ld_free); |
967 | |
968 | /* Add the channel to DMA device channel list */ |
969 | list_add_tail(new: &schan->dma_chan.device_node, |
970 | head: &sdev->dma_dev.channels); |
971 | sdev->schan[id] = schan; |
972 | } |
973 | EXPORT_SYMBOL(shdma_chan_probe); |
974 | |
975 | void shdma_chan_remove(struct shdma_chan *schan) |
976 | { |
977 | list_del(entry: &schan->dma_chan.device_node); |
978 | } |
979 | EXPORT_SYMBOL(shdma_chan_remove); |
980 | |
981 | int shdma_init(struct device *dev, struct shdma_dev *sdev, |
982 | int chan_num) |
983 | { |
984 | struct dma_device *dma_dev = &sdev->dma_dev; |
985 | |
986 | /* |
987 | * Require all call-backs for now, they can trivially be made optional |
988 | * later as required |
989 | */ |
990 | if (!sdev->ops || |
991 | !sdev->desc_size || |
992 | !sdev->ops->embedded_desc || |
993 | !sdev->ops->start_xfer || |
994 | !sdev->ops->setup_xfer || |
995 | !sdev->ops->set_slave || |
996 | !sdev->ops->desc_setup || |
997 | !sdev->ops->slave_addr || |
998 | !sdev->ops->channel_busy || |
999 | !sdev->ops->halt_channel || |
1000 | !sdev->ops->desc_completed) |
1001 | return -EINVAL; |
1002 | |
1003 | sdev->schan = kcalloc(n: chan_num, size: sizeof(*sdev->schan), GFP_KERNEL); |
1004 | if (!sdev->schan) |
1005 | return -ENOMEM; |
1006 | |
1007 | INIT_LIST_HEAD(list: &dma_dev->channels); |
1008 | |
1009 | /* Common and MEMCPY operations */ |
1010 | dma_dev->device_alloc_chan_resources |
1011 | = shdma_alloc_chan_resources; |
1012 | dma_dev->device_free_chan_resources = shdma_free_chan_resources; |
1013 | dma_dev->device_prep_dma_memcpy = shdma_prep_memcpy; |
1014 | dma_dev->device_tx_status = shdma_tx_status; |
1015 | dma_dev->device_issue_pending = shdma_issue_pending; |
1016 | |
1017 | /* Compulsory for DMA_SLAVE fields */ |
1018 | dma_dev->device_prep_slave_sg = shdma_prep_slave_sg; |
1019 | dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic; |
1020 | dma_dev->device_config = shdma_config; |
1021 | dma_dev->device_terminate_all = shdma_terminate_all; |
1022 | |
1023 | dma_dev->dev = dev; |
1024 | |
1025 | return 0; |
1026 | } |
1027 | EXPORT_SYMBOL(shdma_init); |
1028 | |
1029 | void shdma_cleanup(struct shdma_dev *sdev) |
1030 | { |
1031 | kfree(objp: sdev->schan); |
1032 | } |
1033 | EXPORT_SYMBOL(shdma_cleanup); |
1034 | |
1035 | static int __init shdma_enter(void) |
1036 | { |
1037 | shdma_slave_used = bitmap_zalloc(nbits: slave_num, GFP_KERNEL); |
1038 | if (!shdma_slave_used) |
1039 | return -ENOMEM; |
1040 | return 0; |
1041 | } |
1042 | module_init(shdma_enter); |
1043 | |
1044 | static void __exit shdma_exit(void) |
1045 | { |
1046 | bitmap_free(bitmap: shdma_slave_used); |
1047 | } |
1048 | module_exit(shdma_exit); |
1049 | |
1050 | MODULE_DESCRIPTION("SH-DMA driver base library" ); |
1051 | MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>" ); |
1052 | |