1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. |
4 | */ |
5 | |
6 | /* |
7 | * This code implements the DMA subsystem. It provides a HW-neutral interface |
8 | * for other kernel code to use asynchronous memory copy capabilities, |
9 | * if present, and allows different HW DMA drivers to register as providing |
10 | * this capability. |
11 | * |
12 | * Due to the fact we are accelerating what is already a relatively fast |
13 | * operation, the code goes to great lengths to avoid additional overhead, |
14 | * such as locking. |
15 | * |
16 | * LOCKING: |
17 | * |
18 | * The subsystem keeps a global list of dma_device structs it is protected by a |
19 | * mutex, dma_list_mutex. |
20 | * |
21 | * A subsystem can get access to a channel by calling dmaengine_get() followed |
22 | * by dma_find_channel(), or if it has need for an exclusive channel it can call |
23 | * dma_request_channel(). Once a channel is allocated a reference is taken |
24 | * against its corresponding driver to disable removal. |
25 | * |
26 | * Each device has a channels list, which runs unlocked but is never modified |
27 | * once the device is registered, it's just setup by the driver. |
28 | * |
29 | * See Documentation/driver-api/dmaengine for more details |
30 | */ |
31 | |
32 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
33 | |
34 | #include <linux/platform_device.h> |
35 | #include <linux/dma-mapping.h> |
36 | #include <linux/init.h> |
37 | #include <linux/module.h> |
38 | #include <linux/mm.h> |
39 | #include <linux/device.h> |
40 | #include <linux/dmaengine.h> |
41 | #include <linux/hardirq.h> |
42 | #include <linux/spinlock.h> |
43 | #include <linux/percpu.h> |
44 | #include <linux/rcupdate.h> |
45 | #include <linux/mutex.h> |
46 | #include <linux/jiffies.h> |
47 | #include <linux/rculist.h> |
48 | #include <linux/idr.h> |
49 | #include <linux/slab.h> |
50 | #include <linux/acpi.h> |
51 | #include <linux/acpi_dma.h> |
52 | #include <linux/of_dma.h> |
53 | #include <linux/mempool.h> |
54 | #include <linux/numa.h> |
55 | |
56 | #include "dmaengine.h" |
57 | |
58 | static DEFINE_MUTEX(dma_list_mutex); |
59 | static DEFINE_IDA(dma_ida); |
60 | static LIST_HEAD(dma_device_list); |
61 | static long dmaengine_ref_count; |
62 | |
63 | /* --- debugfs implementation --- */ |
64 | #ifdef CONFIG_DEBUG_FS |
65 | #include <linux/debugfs.h> |
66 | |
67 | static struct dentry *rootdir; |
68 | |
69 | static void dmaengine_debug_register(struct dma_device *dma_dev) |
70 | { |
71 | dma_dev->dbg_dev_root = debugfs_create_dir(name: dev_name(dev: dma_dev->dev), |
72 | parent: rootdir); |
73 | if (IS_ERR(ptr: dma_dev->dbg_dev_root)) |
74 | dma_dev->dbg_dev_root = NULL; |
75 | } |
76 | |
77 | static void dmaengine_debug_unregister(struct dma_device *dma_dev) |
78 | { |
79 | debugfs_remove_recursive(dentry: dma_dev->dbg_dev_root); |
80 | dma_dev->dbg_dev_root = NULL; |
81 | } |
82 | |
83 | static void dmaengine_dbg_summary_show(struct seq_file *s, |
84 | struct dma_device *dma_dev) |
85 | { |
86 | struct dma_chan *chan; |
87 | |
88 | list_for_each_entry(chan, &dma_dev->channels, device_node) { |
89 | if (chan->client_count) { |
90 | seq_printf(m: s, fmt: " %-13s| %s" , dma_chan_name(chan), |
91 | chan->dbg_client_name ?: "in-use" ); |
92 | |
93 | if (chan->router) |
94 | seq_printf(m: s, fmt: " (via router: %s)\n" , |
95 | dev_name(dev: chan->router->dev)); |
96 | else |
97 | seq_puts(m: s, s: "\n" ); |
98 | } |
99 | } |
100 | } |
101 | |
102 | static int dmaengine_summary_show(struct seq_file *s, void *data) |
103 | { |
104 | struct dma_device *dma_dev = NULL; |
105 | |
106 | mutex_lock(&dma_list_mutex); |
107 | list_for_each_entry(dma_dev, &dma_device_list, global_node) { |
108 | seq_printf(m: s, fmt: "dma%d (%s): number of channels: %u\n" , |
109 | dma_dev->dev_id, dev_name(dev: dma_dev->dev), |
110 | dma_dev->chancnt); |
111 | |
112 | if (dma_dev->dbg_summary_show) |
113 | dma_dev->dbg_summary_show(s, dma_dev); |
114 | else |
115 | dmaengine_dbg_summary_show(s, dma_dev); |
116 | |
117 | if (!list_is_last(list: &dma_dev->global_node, head: &dma_device_list)) |
118 | seq_puts(m: s, s: "\n" ); |
119 | } |
120 | mutex_unlock(lock: &dma_list_mutex); |
121 | |
122 | return 0; |
123 | } |
124 | DEFINE_SHOW_ATTRIBUTE(dmaengine_summary); |
125 | |
126 | static void __init dmaengine_debugfs_init(void) |
127 | { |
128 | rootdir = debugfs_create_dir(name: "dmaengine" , NULL); |
129 | |
130 | /* /sys/kernel/debug/dmaengine/summary */ |
131 | debugfs_create_file(name: "summary" , mode: 0444, parent: rootdir, NULL, |
132 | fops: &dmaengine_summary_fops); |
133 | } |
134 | #else |
135 | static inline void dmaengine_debugfs_init(void) { } |
136 | static inline int dmaengine_debug_register(struct dma_device *dma_dev) |
137 | { |
138 | return 0; |
139 | } |
140 | |
141 | static inline void dmaengine_debug_unregister(struct dma_device *dma_dev) { } |
142 | #endif /* DEBUG_FS */ |
143 | |
144 | /* --- sysfs implementation --- */ |
145 | |
146 | #define DMA_SLAVE_NAME "slave" |
147 | |
148 | /** |
149 | * dev_to_dma_chan - convert a device pointer to its sysfs container object |
150 | * @dev: device node |
151 | * |
152 | * Must be called under dma_list_mutex. |
153 | */ |
154 | static struct dma_chan *dev_to_dma_chan(struct device *dev) |
155 | { |
156 | struct dma_chan_dev *chan_dev; |
157 | |
158 | chan_dev = container_of(dev, typeof(*chan_dev), device); |
159 | return chan_dev->chan; |
160 | } |
161 | |
162 | static ssize_t memcpy_count_show(struct device *dev, |
163 | struct device_attribute *attr, char *buf) |
164 | { |
165 | struct dma_chan *chan; |
166 | unsigned long count = 0; |
167 | int i; |
168 | int err; |
169 | |
170 | mutex_lock(&dma_list_mutex); |
171 | chan = dev_to_dma_chan(dev); |
172 | if (chan) { |
173 | for_each_possible_cpu(i) |
174 | count += per_cpu_ptr(chan->local, i)->memcpy_count; |
175 | err = sysfs_emit(buf, fmt: "%lu\n" , count); |
176 | } else |
177 | err = -ENODEV; |
178 | mutex_unlock(lock: &dma_list_mutex); |
179 | |
180 | return err; |
181 | } |
182 | static DEVICE_ATTR_RO(memcpy_count); |
183 | |
184 | static ssize_t bytes_transferred_show(struct device *dev, |
185 | struct device_attribute *attr, char *buf) |
186 | { |
187 | struct dma_chan *chan; |
188 | unsigned long count = 0; |
189 | int i; |
190 | int err; |
191 | |
192 | mutex_lock(&dma_list_mutex); |
193 | chan = dev_to_dma_chan(dev); |
194 | if (chan) { |
195 | for_each_possible_cpu(i) |
196 | count += per_cpu_ptr(chan->local, i)->bytes_transferred; |
197 | err = sysfs_emit(buf, fmt: "%lu\n" , count); |
198 | } else |
199 | err = -ENODEV; |
200 | mutex_unlock(lock: &dma_list_mutex); |
201 | |
202 | return err; |
203 | } |
204 | static DEVICE_ATTR_RO(bytes_transferred); |
205 | |
206 | static ssize_t in_use_show(struct device *dev, struct device_attribute *attr, |
207 | char *buf) |
208 | { |
209 | struct dma_chan *chan; |
210 | int err; |
211 | |
212 | mutex_lock(&dma_list_mutex); |
213 | chan = dev_to_dma_chan(dev); |
214 | if (chan) |
215 | err = sysfs_emit(buf, fmt: "%d\n" , chan->client_count); |
216 | else |
217 | err = -ENODEV; |
218 | mutex_unlock(lock: &dma_list_mutex); |
219 | |
220 | return err; |
221 | } |
222 | static DEVICE_ATTR_RO(in_use); |
223 | |
224 | static struct attribute *dma_dev_attrs[] = { |
225 | &dev_attr_memcpy_count.attr, |
226 | &dev_attr_bytes_transferred.attr, |
227 | &dev_attr_in_use.attr, |
228 | NULL, |
229 | }; |
230 | ATTRIBUTE_GROUPS(dma_dev); |
231 | |
232 | static void chan_dev_release(struct device *dev) |
233 | { |
234 | struct dma_chan_dev *chan_dev; |
235 | |
236 | chan_dev = container_of(dev, typeof(*chan_dev), device); |
237 | kfree(objp: chan_dev); |
238 | } |
239 | |
240 | static struct class dma_devclass = { |
241 | .name = "dma" , |
242 | .dev_groups = dma_dev_groups, |
243 | .dev_release = chan_dev_release, |
244 | }; |
245 | |
246 | /* --- client and device registration --- */ |
247 | |
248 | /* enable iteration over all operation types */ |
249 | static dma_cap_mask_t dma_cap_mask_all; |
250 | |
251 | /** |
252 | * struct dma_chan_tbl_ent - tracks channel allocations per core/operation |
253 | * @chan: associated channel for this entry |
254 | */ |
255 | struct dma_chan_tbl_ent { |
256 | struct dma_chan *chan; |
257 | }; |
258 | |
259 | /* percpu lookup table for memory-to-memory offload providers */ |
260 | static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END]; |
261 | |
262 | static int __init dma_channel_table_init(void) |
263 | { |
264 | enum dma_transaction_type cap; |
265 | int err = 0; |
266 | |
267 | bitmap_fill(dst: dma_cap_mask_all.bits, nbits: DMA_TX_TYPE_END); |
268 | |
269 | /* 'interrupt', 'private', and 'slave' are channel capabilities, |
270 | * but are not associated with an operation so they do not need |
271 | * an entry in the channel_table |
272 | */ |
273 | clear_bit(nr: DMA_INTERRUPT, addr: dma_cap_mask_all.bits); |
274 | clear_bit(nr: DMA_PRIVATE, addr: dma_cap_mask_all.bits); |
275 | clear_bit(nr: DMA_SLAVE, addr: dma_cap_mask_all.bits); |
276 | |
277 | for_each_dma_cap_mask(cap, dma_cap_mask_all) { |
278 | channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent); |
279 | if (!channel_table[cap]) { |
280 | err = -ENOMEM; |
281 | break; |
282 | } |
283 | } |
284 | |
285 | if (err) { |
286 | pr_err("dmaengine dma_channel_table_init failure: %d\n" , err); |
287 | for_each_dma_cap_mask(cap, dma_cap_mask_all) |
288 | free_percpu(pdata: channel_table[cap]); |
289 | } |
290 | |
291 | return err; |
292 | } |
293 | arch_initcall(dma_channel_table_init); |
294 | |
295 | /** |
296 | * dma_chan_is_local - checks if the channel is in the same NUMA-node as the CPU |
297 | * @chan: DMA channel to test |
298 | * @cpu: CPU index which the channel should be close to |
299 | * |
300 | * Returns true if the channel is in the same NUMA-node as the CPU. |
301 | */ |
302 | static bool dma_chan_is_local(struct dma_chan *chan, int cpu) |
303 | { |
304 | int node = dev_to_node(dev: chan->device->dev); |
305 | return node == NUMA_NO_NODE || |
306 | cpumask_test_cpu(cpu, cpumask: cpumask_of_node(node)); |
307 | } |
308 | |
309 | /** |
310 | * min_chan - finds the channel with min count and in the same NUMA-node as the CPU |
311 | * @cap: capability to match |
312 | * @cpu: CPU index which the channel should be close to |
313 | * |
314 | * If some channels are close to the given CPU, the one with the lowest |
315 | * reference count is returned. Otherwise, CPU is ignored and only the |
316 | * reference count is taken into account. |
317 | * |
318 | * Must be called under dma_list_mutex. |
319 | */ |
320 | static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu) |
321 | { |
322 | struct dma_device *device; |
323 | struct dma_chan *chan; |
324 | struct dma_chan *min = NULL; |
325 | struct dma_chan *localmin = NULL; |
326 | |
327 | list_for_each_entry(device, &dma_device_list, global_node) { |
328 | if (!dma_has_cap(cap, device->cap_mask) || |
329 | dma_has_cap(DMA_PRIVATE, device->cap_mask)) |
330 | continue; |
331 | list_for_each_entry(chan, &device->channels, device_node) { |
332 | if (!chan->client_count) |
333 | continue; |
334 | if (!min || chan->table_count < min->table_count) |
335 | min = chan; |
336 | |
337 | if (dma_chan_is_local(chan, cpu)) |
338 | if (!localmin || |
339 | chan->table_count < localmin->table_count) |
340 | localmin = chan; |
341 | } |
342 | } |
343 | |
344 | chan = localmin ? localmin : min; |
345 | |
346 | if (chan) |
347 | chan->table_count++; |
348 | |
349 | return chan; |
350 | } |
351 | |
352 | /** |
353 | * dma_channel_rebalance - redistribute the available channels |
354 | * |
355 | * Optimize for CPU isolation (each CPU gets a dedicated channel for an |
356 | * operation type) in the SMP case, and operation isolation (avoid |
357 | * multi-tasking channels) in the non-SMP case. |
358 | * |
359 | * Must be called under dma_list_mutex. |
360 | */ |
361 | static void dma_channel_rebalance(void) |
362 | { |
363 | struct dma_chan *chan; |
364 | struct dma_device *device; |
365 | int cpu; |
366 | int cap; |
367 | |
368 | /* undo the last distribution */ |
369 | for_each_dma_cap_mask(cap, dma_cap_mask_all) |
370 | for_each_possible_cpu(cpu) |
371 | per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; |
372 | |
373 | list_for_each_entry(device, &dma_device_list, global_node) { |
374 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) |
375 | continue; |
376 | list_for_each_entry(chan, &device->channels, device_node) |
377 | chan->table_count = 0; |
378 | } |
379 | |
380 | /* don't populate the channel_table if no clients are available */ |
381 | if (!dmaengine_ref_count) |
382 | return; |
383 | |
384 | /* redistribute available channels */ |
385 | for_each_dma_cap_mask(cap, dma_cap_mask_all) |
386 | for_each_online_cpu(cpu) { |
387 | chan = min_chan(cap, cpu); |
388 | per_cpu_ptr(channel_table[cap], cpu)->chan = chan; |
389 | } |
390 | } |
391 | |
392 | static int dma_device_satisfies_mask(struct dma_device *device, |
393 | const dma_cap_mask_t *want) |
394 | { |
395 | dma_cap_mask_t has; |
396 | |
397 | bitmap_and(dst: has.bits, src1: want->bits, src2: device->cap_mask.bits, |
398 | nbits: DMA_TX_TYPE_END); |
399 | return bitmap_equal(src1: want->bits, src2: has.bits, nbits: DMA_TX_TYPE_END); |
400 | } |
401 | |
402 | static struct module *dma_chan_to_owner(struct dma_chan *chan) |
403 | { |
404 | return chan->device->owner; |
405 | } |
406 | |
407 | /** |
408 | * balance_ref_count - catch up the channel reference count |
409 | * @chan: channel to balance ->client_count versus dmaengine_ref_count |
410 | * |
411 | * Must be called under dma_list_mutex. |
412 | */ |
413 | static void balance_ref_count(struct dma_chan *chan) |
414 | { |
415 | struct module *owner = dma_chan_to_owner(chan); |
416 | |
417 | while (chan->client_count < dmaengine_ref_count) { |
418 | __module_get(module: owner); |
419 | chan->client_count++; |
420 | } |
421 | } |
422 | |
423 | static void dma_device_release(struct kref *ref) |
424 | { |
425 | struct dma_device *device = container_of(ref, struct dma_device, ref); |
426 | |
427 | list_del_rcu(entry: &device->global_node); |
428 | dma_channel_rebalance(); |
429 | |
430 | if (device->device_release) |
431 | device->device_release(device); |
432 | } |
433 | |
434 | static void dma_device_put(struct dma_device *device) |
435 | { |
436 | lockdep_assert_held(&dma_list_mutex); |
437 | kref_put(kref: &device->ref, release: dma_device_release); |
438 | } |
439 | |
440 | /** |
441 | * dma_chan_get - try to grab a DMA channel's parent driver module |
442 | * @chan: channel to grab |
443 | * |
444 | * Must be called under dma_list_mutex. |
445 | */ |
446 | static int dma_chan_get(struct dma_chan *chan) |
447 | { |
448 | struct module *owner = dma_chan_to_owner(chan); |
449 | int ret; |
450 | |
451 | /* The channel is already in use, update client count */ |
452 | if (chan->client_count) { |
453 | __module_get(module: owner); |
454 | chan->client_count++; |
455 | return 0; |
456 | } |
457 | |
458 | if (!try_module_get(module: owner)) |
459 | return -ENODEV; |
460 | |
461 | ret = kref_get_unless_zero(kref: &chan->device->ref); |
462 | if (!ret) { |
463 | ret = -ENODEV; |
464 | goto module_put_out; |
465 | } |
466 | |
467 | /* allocate upon first client reference */ |
468 | if (chan->device->device_alloc_chan_resources) { |
469 | ret = chan->device->device_alloc_chan_resources(chan); |
470 | if (ret < 0) |
471 | goto err_out; |
472 | } |
473 | |
474 | chan->client_count++; |
475 | |
476 | if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) |
477 | balance_ref_count(chan); |
478 | |
479 | return 0; |
480 | |
481 | err_out: |
482 | dma_device_put(device: chan->device); |
483 | module_put_out: |
484 | module_put(module: owner); |
485 | return ret; |
486 | } |
487 | |
488 | /** |
489 | * dma_chan_put - drop a reference to a DMA channel's parent driver module |
490 | * @chan: channel to release |
491 | * |
492 | * Must be called under dma_list_mutex. |
493 | */ |
494 | static void dma_chan_put(struct dma_chan *chan) |
495 | { |
496 | /* This channel is not in use, bail out */ |
497 | if (!chan->client_count) |
498 | return; |
499 | |
500 | chan->client_count--; |
501 | |
502 | /* This channel is not in use anymore, free it */ |
503 | if (!chan->client_count && chan->device->device_free_chan_resources) { |
504 | /* Make sure all operations have completed */ |
505 | dmaengine_synchronize(chan); |
506 | chan->device->device_free_chan_resources(chan); |
507 | } |
508 | |
509 | /* If the channel is used via a DMA request router, free the mapping */ |
510 | if (chan->router && chan->router->route_free) { |
511 | chan->router->route_free(chan->router->dev, chan->route_data); |
512 | chan->router = NULL; |
513 | chan->route_data = NULL; |
514 | } |
515 | |
516 | dma_device_put(device: chan->device); |
517 | module_put(module: dma_chan_to_owner(chan)); |
518 | } |
519 | |
520 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) |
521 | { |
522 | enum dma_status status; |
523 | unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(m: 5000); |
524 | |
525 | dma_async_issue_pending(chan); |
526 | do { |
527 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); |
528 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { |
529 | dev_err(chan->device->dev, "%s: timeout!\n" , __func__); |
530 | return DMA_ERROR; |
531 | } |
532 | if (status != DMA_IN_PROGRESS) |
533 | break; |
534 | cpu_relax(); |
535 | } while (1); |
536 | |
537 | return status; |
538 | } |
539 | EXPORT_SYMBOL(dma_sync_wait); |
540 | |
541 | /** |
542 | * dma_find_channel - find a channel to carry out the operation |
543 | * @tx_type: transaction type |
544 | */ |
545 | struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) |
546 | { |
547 | return this_cpu_read(channel_table[tx_type]->chan); |
548 | } |
549 | EXPORT_SYMBOL(dma_find_channel); |
550 | |
551 | /** |
552 | * dma_issue_pending_all - flush all pending operations across all channels |
553 | */ |
554 | void dma_issue_pending_all(void) |
555 | { |
556 | struct dma_device *device; |
557 | struct dma_chan *chan; |
558 | |
559 | rcu_read_lock(); |
560 | list_for_each_entry_rcu(device, &dma_device_list, global_node) { |
561 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) |
562 | continue; |
563 | list_for_each_entry(chan, &device->channels, device_node) |
564 | if (chan->client_count) |
565 | device->device_issue_pending(chan); |
566 | } |
567 | rcu_read_unlock(); |
568 | } |
569 | EXPORT_SYMBOL(dma_issue_pending_all); |
570 | |
571 | int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) |
572 | { |
573 | struct dma_device *device; |
574 | |
575 | if (!chan || !caps) |
576 | return -EINVAL; |
577 | |
578 | device = chan->device; |
579 | |
580 | /* check if the channel supports slave transactions */ |
581 | if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) || |
582 | test_bit(DMA_CYCLIC, device->cap_mask.bits))) |
583 | return -ENXIO; |
584 | |
585 | /* |
586 | * Check whether it reports it uses the generic slave |
587 | * capabilities, if not, that means it doesn't support any |
588 | * kind of slave capabilities reporting. |
589 | */ |
590 | if (!device->directions) |
591 | return -ENXIO; |
592 | |
593 | caps->src_addr_widths = device->src_addr_widths; |
594 | caps->dst_addr_widths = device->dst_addr_widths; |
595 | caps->directions = device->directions; |
596 | caps->min_burst = device->min_burst; |
597 | caps->max_burst = device->max_burst; |
598 | caps->max_sg_burst = device->max_sg_burst; |
599 | caps->residue_granularity = device->residue_granularity; |
600 | caps->descriptor_reuse = device->descriptor_reuse; |
601 | caps->cmd_pause = !!device->device_pause; |
602 | caps->cmd_resume = !!device->device_resume; |
603 | caps->cmd_terminate = !!device->device_terminate_all; |
604 | |
605 | /* |
606 | * DMA engine device might be configured with non-uniformly |
607 | * distributed slave capabilities per device channels. In this |
608 | * case the corresponding driver may provide the device_caps |
609 | * callback to override the generic capabilities with |
610 | * channel-specific ones. |
611 | */ |
612 | if (device->device_caps) |
613 | device->device_caps(chan, caps); |
614 | |
615 | return 0; |
616 | } |
617 | EXPORT_SYMBOL_GPL(dma_get_slave_caps); |
618 | |
619 | static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, |
620 | struct dma_device *dev, |
621 | dma_filter_fn fn, void *fn_param) |
622 | { |
623 | struct dma_chan *chan; |
624 | |
625 | if (mask && !dma_device_satisfies_mask(device: dev, want: mask)) { |
626 | dev_dbg(dev->dev, "%s: wrong capabilities\n" , __func__); |
627 | return NULL; |
628 | } |
629 | /* devices with multiple channels need special handling as we need to |
630 | * ensure that all channels are either private or public. |
631 | */ |
632 | if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask)) |
633 | list_for_each_entry(chan, &dev->channels, device_node) { |
634 | /* some channels are already publicly allocated */ |
635 | if (chan->client_count) |
636 | return NULL; |
637 | } |
638 | |
639 | list_for_each_entry(chan, &dev->channels, device_node) { |
640 | if (chan->client_count) { |
641 | dev_dbg(dev->dev, "%s: %s busy\n" , |
642 | __func__, dma_chan_name(chan)); |
643 | continue; |
644 | } |
645 | if (fn && !fn(chan, fn_param)) { |
646 | dev_dbg(dev->dev, "%s: %s filter said false\n" , |
647 | __func__, dma_chan_name(chan)); |
648 | continue; |
649 | } |
650 | return chan; |
651 | } |
652 | |
653 | return NULL; |
654 | } |
655 | |
656 | static struct dma_chan *find_candidate(struct dma_device *device, |
657 | const dma_cap_mask_t *mask, |
658 | dma_filter_fn fn, void *fn_param) |
659 | { |
660 | struct dma_chan *chan = private_candidate(mask, dev: device, fn, fn_param); |
661 | int err; |
662 | |
663 | if (chan) { |
664 | /* Found a suitable channel, try to grab, prep, and return it. |
665 | * We first set DMA_PRIVATE to disable balance_ref_count as this |
666 | * channel will not be published in the general-purpose |
667 | * allocator |
668 | */ |
669 | dma_cap_set(DMA_PRIVATE, device->cap_mask); |
670 | device->privatecnt++; |
671 | err = dma_chan_get(chan); |
672 | |
673 | if (err) { |
674 | if (err == -ENODEV) { |
675 | dev_dbg(device->dev, "%s: %s module removed\n" , |
676 | __func__, dma_chan_name(chan)); |
677 | list_del_rcu(entry: &device->global_node); |
678 | } else |
679 | dev_dbg(device->dev, |
680 | "%s: failed to get %s: (%d)\n" , |
681 | __func__, dma_chan_name(chan), err); |
682 | |
683 | if (--device->privatecnt == 0) |
684 | dma_cap_clear(DMA_PRIVATE, device->cap_mask); |
685 | |
686 | chan = ERR_PTR(error: err); |
687 | } |
688 | } |
689 | |
690 | return chan ? chan : ERR_PTR(error: -EPROBE_DEFER); |
691 | } |
692 | |
693 | /** |
694 | * dma_get_slave_channel - try to get specific channel exclusively |
695 | * @chan: target channel |
696 | */ |
697 | struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) |
698 | { |
699 | /* lock against __dma_request_channel */ |
700 | mutex_lock(&dma_list_mutex); |
701 | |
702 | if (chan->client_count == 0) { |
703 | struct dma_device *device = chan->device; |
704 | int err; |
705 | |
706 | dma_cap_set(DMA_PRIVATE, device->cap_mask); |
707 | device->privatecnt++; |
708 | err = dma_chan_get(chan); |
709 | if (err) { |
710 | dev_dbg(chan->device->dev, |
711 | "%s: failed to get %s: (%d)\n" , |
712 | __func__, dma_chan_name(chan), err); |
713 | chan = NULL; |
714 | if (--device->privatecnt == 0) |
715 | dma_cap_clear(DMA_PRIVATE, device->cap_mask); |
716 | } |
717 | } else |
718 | chan = NULL; |
719 | |
720 | mutex_unlock(lock: &dma_list_mutex); |
721 | |
722 | |
723 | return chan; |
724 | } |
725 | EXPORT_SYMBOL_GPL(dma_get_slave_channel); |
726 | |
727 | struct dma_chan *dma_get_any_slave_channel(struct dma_device *device) |
728 | { |
729 | dma_cap_mask_t mask; |
730 | struct dma_chan *chan; |
731 | |
732 | dma_cap_zero(mask); |
733 | dma_cap_set(DMA_SLAVE, mask); |
734 | |
735 | /* lock against __dma_request_channel */ |
736 | mutex_lock(&dma_list_mutex); |
737 | |
738 | chan = find_candidate(device, mask: &mask, NULL, NULL); |
739 | |
740 | mutex_unlock(lock: &dma_list_mutex); |
741 | |
742 | return IS_ERR(ptr: chan) ? NULL : chan; |
743 | } |
744 | EXPORT_SYMBOL_GPL(dma_get_any_slave_channel); |
745 | |
746 | /** |
747 | * __dma_request_channel - try to allocate an exclusive channel |
748 | * @mask: capabilities that the channel must satisfy |
749 | * @fn: optional callback to disposition available channels |
750 | * @fn_param: opaque parameter to pass to dma_filter_fn() |
751 | * @np: device node to look for DMA channels |
752 | * |
753 | * Returns pointer to appropriate DMA channel on success or NULL. |
754 | */ |
755 | struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, |
756 | dma_filter_fn fn, void *fn_param, |
757 | struct device_node *np) |
758 | { |
759 | struct dma_device *device, *_d; |
760 | struct dma_chan *chan = NULL; |
761 | |
762 | /* Find a channel */ |
763 | mutex_lock(&dma_list_mutex); |
764 | list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { |
765 | /* Finds a DMA controller with matching device node */ |
766 | if (np && device->dev->of_node && np != device->dev->of_node) |
767 | continue; |
768 | |
769 | chan = find_candidate(device, mask, fn, fn_param); |
770 | if (!IS_ERR(ptr: chan)) |
771 | break; |
772 | |
773 | chan = NULL; |
774 | } |
775 | mutex_unlock(lock: &dma_list_mutex); |
776 | |
777 | pr_debug("%s: %s (%s)\n" , |
778 | __func__, |
779 | chan ? "success" : "fail" , |
780 | chan ? dma_chan_name(chan) : NULL); |
781 | |
782 | return chan; |
783 | } |
784 | EXPORT_SYMBOL_GPL(__dma_request_channel); |
785 | |
786 | static const struct dma_slave_map *dma_filter_match(struct dma_device *device, |
787 | const char *name, |
788 | struct device *dev) |
789 | { |
790 | int i; |
791 | |
792 | if (!device->filter.mapcnt) |
793 | return NULL; |
794 | |
795 | for (i = 0; i < device->filter.mapcnt; i++) { |
796 | const struct dma_slave_map *map = &device->filter.map[i]; |
797 | |
798 | if (!strcmp(map->devname, dev_name(dev)) && |
799 | !strcmp(map->slave, name)) |
800 | return map; |
801 | } |
802 | |
803 | return NULL; |
804 | } |
805 | |
806 | /** |
807 | * dma_request_chan - try to allocate an exclusive slave channel |
808 | * @dev: pointer to client device structure |
809 | * @name: slave channel name |
810 | * |
811 | * Returns pointer to appropriate DMA channel on success or an error pointer. |
812 | */ |
813 | struct dma_chan *dma_request_chan(struct device *dev, const char *name) |
814 | { |
815 | struct dma_device *d, *_d; |
816 | struct dma_chan *chan = NULL; |
817 | |
818 | /* If device-tree is present get slave info from here */ |
819 | if (dev->of_node) |
820 | chan = of_dma_request_slave_channel(np: dev->of_node, name); |
821 | |
822 | /* If device was enumerated by ACPI get slave info from here */ |
823 | if (has_acpi_companion(dev) && !chan) |
824 | chan = acpi_dma_request_slave_chan_by_name(dev, name); |
825 | |
826 | if (PTR_ERR(ptr: chan) == -EPROBE_DEFER) |
827 | return chan; |
828 | |
829 | if (!IS_ERR_OR_NULL(ptr: chan)) |
830 | goto found; |
831 | |
832 | /* Try to find the channel via the DMA filter map(s) */ |
833 | mutex_lock(&dma_list_mutex); |
834 | list_for_each_entry_safe(d, _d, &dma_device_list, global_node) { |
835 | dma_cap_mask_t mask; |
836 | const struct dma_slave_map *map = dma_filter_match(device: d, name, dev); |
837 | |
838 | if (!map) |
839 | continue; |
840 | |
841 | dma_cap_zero(mask); |
842 | dma_cap_set(DMA_SLAVE, mask); |
843 | |
844 | chan = find_candidate(device: d, mask: &mask, fn: d->filter.fn, fn_param: map->param); |
845 | if (!IS_ERR(ptr: chan)) |
846 | break; |
847 | } |
848 | mutex_unlock(lock: &dma_list_mutex); |
849 | |
850 | if (IS_ERR(ptr: chan)) |
851 | return chan; |
852 | if (!chan) |
853 | return ERR_PTR(error: -EPROBE_DEFER); |
854 | |
855 | found: |
856 | #ifdef CONFIG_DEBUG_FS |
857 | chan->dbg_client_name = kasprintf(GFP_KERNEL, fmt: "%s:%s" , dev_name(dev), |
858 | name); |
859 | #endif |
860 | |
861 | chan->name = kasprintf(GFP_KERNEL, fmt: "dma:%s" , name); |
862 | if (!chan->name) |
863 | return chan; |
864 | chan->slave = dev; |
865 | |
866 | if (sysfs_create_link(kobj: &chan->dev->device.kobj, target: &dev->kobj, |
867 | DMA_SLAVE_NAME)) |
868 | dev_warn(dev, "Cannot create DMA %s symlink\n" , DMA_SLAVE_NAME); |
869 | if (sysfs_create_link(kobj: &dev->kobj, target: &chan->dev->device.kobj, name: chan->name)) |
870 | dev_warn(dev, "Cannot create DMA %s symlink\n" , chan->name); |
871 | |
872 | return chan; |
873 | } |
874 | EXPORT_SYMBOL_GPL(dma_request_chan); |
875 | |
876 | /** |
877 | * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities |
878 | * @mask: capabilities that the channel must satisfy |
879 | * |
880 | * Returns pointer to appropriate DMA channel on success or an error pointer. |
881 | */ |
882 | struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask) |
883 | { |
884 | struct dma_chan *chan; |
885 | |
886 | if (!mask) |
887 | return ERR_PTR(error: -ENODEV); |
888 | |
889 | chan = __dma_request_channel(mask, NULL, NULL, NULL); |
890 | if (!chan) { |
891 | mutex_lock(&dma_list_mutex); |
892 | if (list_empty(head: &dma_device_list)) |
893 | chan = ERR_PTR(error: -EPROBE_DEFER); |
894 | else |
895 | chan = ERR_PTR(error: -ENODEV); |
896 | mutex_unlock(lock: &dma_list_mutex); |
897 | } |
898 | |
899 | return chan; |
900 | } |
901 | EXPORT_SYMBOL_GPL(dma_request_chan_by_mask); |
902 | |
903 | void dma_release_channel(struct dma_chan *chan) |
904 | { |
905 | mutex_lock(&dma_list_mutex); |
906 | WARN_ONCE(chan->client_count != 1, |
907 | "chan reference count %d != 1\n" , chan->client_count); |
908 | dma_chan_put(chan); |
909 | /* drop PRIVATE cap enabled by __dma_request_channel() */ |
910 | if (--chan->device->privatecnt == 0) |
911 | dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); |
912 | |
913 | if (chan->slave) { |
914 | sysfs_remove_link(kobj: &chan->dev->device.kobj, DMA_SLAVE_NAME); |
915 | sysfs_remove_link(kobj: &chan->slave->kobj, name: chan->name); |
916 | kfree(objp: chan->name); |
917 | chan->name = NULL; |
918 | chan->slave = NULL; |
919 | } |
920 | |
921 | #ifdef CONFIG_DEBUG_FS |
922 | kfree(objp: chan->dbg_client_name); |
923 | chan->dbg_client_name = NULL; |
924 | #endif |
925 | mutex_unlock(lock: &dma_list_mutex); |
926 | } |
927 | EXPORT_SYMBOL_GPL(dma_release_channel); |
928 | |
929 | /** |
930 | * dmaengine_get - register interest in dma_channels |
931 | */ |
932 | void dmaengine_get(void) |
933 | { |
934 | struct dma_device *device, *_d; |
935 | struct dma_chan *chan; |
936 | int err; |
937 | |
938 | mutex_lock(&dma_list_mutex); |
939 | dmaengine_ref_count++; |
940 | |
941 | /* try to grab channels */ |
942 | list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { |
943 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) |
944 | continue; |
945 | list_for_each_entry(chan, &device->channels, device_node) { |
946 | err = dma_chan_get(chan); |
947 | if (err == -ENODEV) { |
948 | /* module removed before we could use it */ |
949 | list_del_rcu(entry: &device->global_node); |
950 | break; |
951 | } else if (err) |
952 | dev_dbg(chan->device->dev, |
953 | "%s: failed to get %s: (%d)\n" , |
954 | __func__, dma_chan_name(chan), err); |
955 | } |
956 | } |
957 | |
958 | /* if this is the first reference and there were channels |
959 | * waiting we need to rebalance to get those channels |
960 | * incorporated into the channel table |
961 | */ |
962 | if (dmaengine_ref_count == 1) |
963 | dma_channel_rebalance(); |
964 | mutex_unlock(lock: &dma_list_mutex); |
965 | } |
966 | EXPORT_SYMBOL(dmaengine_get); |
967 | |
968 | /** |
969 | * dmaengine_put - let DMA drivers be removed when ref_count == 0 |
970 | */ |
971 | void dmaengine_put(void) |
972 | { |
973 | struct dma_device *device, *_d; |
974 | struct dma_chan *chan; |
975 | |
976 | mutex_lock(&dma_list_mutex); |
977 | dmaengine_ref_count--; |
978 | BUG_ON(dmaengine_ref_count < 0); |
979 | /* drop channel references */ |
980 | list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { |
981 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) |
982 | continue; |
983 | list_for_each_entry(chan, &device->channels, device_node) |
984 | dma_chan_put(chan); |
985 | } |
986 | mutex_unlock(lock: &dma_list_mutex); |
987 | } |
988 | EXPORT_SYMBOL(dmaengine_put); |
989 | |
990 | static bool device_has_all_tx_types(struct dma_device *device) |
991 | { |
992 | /* A device that satisfies this test has channels that will never cause |
993 | * an async_tx channel switch event as all possible operation types can |
994 | * be handled. |
995 | */ |
996 | #ifdef CONFIG_ASYNC_TX_DMA |
997 | if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask)) |
998 | return false; |
999 | #endif |
1000 | |
1001 | #if IS_ENABLED(CONFIG_ASYNC_MEMCPY) |
1002 | if (!dma_has_cap(DMA_MEMCPY, device->cap_mask)) |
1003 | return false; |
1004 | #endif |
1005 | |
1006 | #if IS_ENABLED(CONFIG_ASYNC_XOR) |
1007 | if (!dma_has_cap(DMA_XOR, device->cap_mask)) |
1008 | return false; |
1009 | |
1010 | #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA |
1011 | if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask)) |
1012 | return false; |
1013 | #endif |
1014 | #endif |
1015 | |
1016 | #if IS_ENABLED(CONFIG_ASYNC_PQ) |
1017 | if (!dma_has_cap(DMA_PQ, device->cap_mask)) |
1018 | return false; |
1019 | |
1020 | #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA |
1021 | if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask)) |
1022 | return false; |
1023 | #endif |
1024 | #endif |
1025 | |
1026 | return true; |
1027 | } |
1028 | |
1029 | static int get_dma_id(struct dma_device *device) |
1030 | { |
1031 | int rc = ida_alloc(ida: &dma_ida, GFP_KERNEL); |
1032 | |
1033 | if (rc < 0) |
1034 | return rc; |
1035 | device->dev_id = rc; |
1036 | return 0; |
1037 | } |
1038 | |
1039 | static int __dma_async_device_channel_register(struct dma_device *device, |
1040 | struct dma_chan *chan) |
1041 | { |
1042 | int rc; |
1043 | |
1044 | chan->local = alloc_percpu(typeof(*chan->local)); |
1045 | if (!chan->local) |
1046 | return -ENOMEM; |
1047 | chan->dev = kzalloc(size: sizeof(*chan->dev), GFP_KERNEL); |
1048 | if (!chan->dev) { |
1049 | rc = -ENOMEM; |
1050 | goto err_free_local; |
1051 | } |
1052 | |
1053 | /* |
1054 | * When the chan_id is a negative value, we are dynamically adding |
1055 | * the channel. Otherwise we are static enumerating. |
1056 | */ |
1057 | chan->chan_id = ida_alloc(ida: &device->chan_ida, GFP_KERNEL); |
1058 | if (chan->chan_id < 0) { |
1059 | pr_err("%s: unable to alloc ida for chan: %d\n" , |
1060 | __func__, chan->chan_id); |
1061 | rc = chan->chan_id; |
1062 | goto err_free_dev; |
1063 | } |
1064 | |
1065 | chan->dev->device.class = &dma_devclass; |
1066 | chan->dev->device.parent = device->dev; |
1067 | chan->dev->chan = chan; |
1068 | chan->dev->dev_id = device->dev_id; |
1069 | dev_set_name(dev: &chan->dev->device, name: "dma%dchan%d" , |
1070 | device->dev_id, chan->chan_id); |
1071 | rc = device_register(dev: &chan->dev->device); |
1072 | if (rc) |
1073 | goto err_out_ida; |
1074 | chan->client_count = 0; |
1075 | device->chancnt++; |
1076 | |
1077 | return 0; |
1078 | |
1079 | err_out_ida: |
1080 | ida_free(&device->chan_ida, id: chan->chan_id); |
1081 | err_free_dev: |
1082 | kfree(objp: chan->dev); |
1083 | err_free_local: |
1084 | free_percpu(pdata: chan->local); |
1085 | chan->local = NULL; |
1086 | return rc; |
1087 | } |
1088 | |
1089 | int dma_async_device_channel_register(struct dma_device *device, |
1090 | struct dma_chan *chan) |
1091 | { |
1092 | int rc; |
1093 | |
1094 | rc = __dma_async_device_channel_register(device, chan); |
1095 | if (rc < 0) |
1096 | return rc; |
1097 | |
1098 | dma_channel_rebalance(); |
1099 | return 0; |
1100 | } |
1101 | EXPORT_SYMBOL_GPL(dma_async_device_channel_register); |
1102 | |
1103 | static void __dma_async_device_channel_unregister(struct dma_device *device, |
1104 | struct dma_chan *chan) |
1105 | { |
1106 | WARN_ONCE(!device->device_release && chan->client_count, |
1107 | "%s called while %d clients hold a reference\n" , |
1108 | __func__, chan->client_count); |
1109 | mutex_lock(&dma_list_mutex); |
1110 | device->chancnt--; |
1111 | chan->dev->chan = NULL; |
1112 | mutex_unlock(lock: &dma_list_mutex); |
1113 | ida_free(&device->chan_ida, id: chan->chan_id); |
1114 | device_unregister(dev: &chan->dev->device); |
1115 | free_percpu(pdata: chan->local); |
1116 | } |
1117 | |
1118 | void dma_async_device_channel_unregister(struct dma_device *device, |
1119 | struct dma_chan *chan) |
1120 | { |
1121 | __dma_async_device_channel_unregister(device, chan); |
1122 | dma_channel_rebalance(); |
1123 | } |
1124 | EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister); |
1125 | |
1126 | /** |
1127 | * dma_async_device_register - registers DMA devices found |
1128 | * @device: pointer to &struct dma_device |
1129 | * |
1130 | * After calling this routine the structure should not be freed except in the |
1131 | * device_release() callback which will be called after |
1132 | * dma_async_device_unregister() is called and no further references are taken. |
1133 | */ |
1134 | int dma_async_device_register(struct dma_device *device) |
1135 | { |
1136 | int rc; |
1137 | struct dma_chan* chan; |
1138 | |
1139 | if (!device) |
1140 | return -ENODEV; |
1141 | |
1142 | /* validate device routines */ |
1143 | if (!device->dev) { |
1144 | pr_err("DMAdevice must have dev\n" ); |
1145 | return -EIO; |
1146 | } |
1147 | |
1148 | device->owner = device->dev->driver->owner; |
1149 | |
1150 | #define CHECK_CAP(_name, _type) \ |
1151 | { \ |
1152 | if (dma_has_cap(_type, device->cap_mask) && !device->device_prep_##_name) { \ |
1153 | dev_err(device->dev, \ |
1154 | "Device claims capability %s, but op is not defined\n", \ |
1155 | __stringify(_type)); \ |
1156 | return -EIO; \ |
1157 | } \ |
1158 | } |
1159 | |
1160 | CHECK_CAP(dma_memcpy, DMA_MEMCPY); |
1161 | CHECK_CAP(dma_xor, DMA_XOR); |
1162 | CHECK_CAP(dma_xor_val, DMA_XOR_VAL); |
1163 | CHECK_CAP(dma_pq, DMA_PQ); |
1164 | CHECK_CAP(dma_pq_val, DMA_PQ_VAL); |
1165 | CHECK_CAP(dma_memset, DMA_MEMSET); |
1166 | CHECK_CAP(dma_interrupt, DMA_INTERRUPT); |
1167 | CHECK_CAP(dma_cyclic, DMA_CYCLIC); |
1168 | CHECK_CAP(interleaved_dma, DMA_INTERLEAVE); |
1169 | |
1170 | #undef CHECK_CAP |
1171 | |
1172 | if (!device->device_tx_status) { |
1173 | dev_err(device->dev, "Device tx_status is not defined\n" ); |
1174 | return -EIO; |
1175 | } |
1176 | |
1177 | |
1178 | if (!device->device_issue_pending) { |
1179 | dev_err(device->dev, "Device issue_pending is not defined\n" ); |
1180 | return -EIO; |
1181 | } |
1182 | |
1183 | if (!device->device_release) |
1184 | dev_dbg(device->dev, |
1185 | "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n" ); |
1186 | |
1187 | kref_init(kref: &device->ref); |
1188 | |
1189 | /* note: this only matters in the |
1190 | * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case |
1191 | */ |
1192 | if (device_has_all_tx_types(device)) |
1193 | dma_cap_set(DMA_ASYNC_TX, device->cap_mask); |
1194 | |
1195 | rc = get_dma_id(device); |
1196 | if (rc != 0) |
1197 | return rc; |
1198 | |
1199 | ida_init(ida: &device->chan_ida); |
1200 | |
1201 | /* represent channels in sysfs. Probably want devs too */ |
1202 | list_for_each_entry(chan, &device->channels, device_node) { |
1203 | rc = __dma_async_device_channel_register(device, chan); |
1204 | if (rc < 0) |
1205 | goto err_out; |
1206 | } |
1207 | |
1208 | mutex_lock(&dma_list_mutex); |
1209 | /* take references on public channels */ |
1210 | if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) |
1211 | list_for_each_entry(chan, &device->channels, device_node) { |
1212 | /* if clients are already waiting for channels we need |
1213 | * to take references on their behalf |
1214 | */ |
1215 | if (dma_chan_get(chan) == -ENODEV) { |
1216 | /* note we can only get here for the first |
1217 | * channel as the remaining channels are |
1218 | * guaranteed to get a reference |
1219 | */ |
1220 | rc = -ENODEV; |
1221 | mutex_unlock(lock: &dma_list_mutex); |
1222 | goto err_out; |
1223 | } |
1224 | } |
1225 | list_add_tail_rcu(new: &device->global_node, head: &dma_device_list); |
1226 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) |
1227 | device->privatecnt++; /* Always private */ |
1228 | dma_channel_rebalance(); |
1229 | mutex_unlock(lock: &dma_list_mutex); |
1230 | |
1231 | dmaengine_debug_register(dma_dev: device); |
1232 | |
1233 | return 0; |
1234 | |
1235 | err_out: |
1236 | /* if we never registered a channel just release the idr */ |
1237 | if (!device->chancnt) { |
1238 | ida_free(&dma_ida, id: device->dev_id); |
1239 | return rc; |
1240 | } |
1241 | |
1242 | list_for_each_entry(chan, &device->channels, device_node) { |
1243 | if (chan->local == NULL) |
1244 | continue; |
1245 | mutex_lock(&dma_list_mutex); |
1246 | chan->dev->chan = NULL; |
1247 | mutex_unlock(lock: &dma_list_mutex); |
1248 | device_unregister(dev: &chan->dev->device); |
1249 | free_percpu(pdata: chan->local); |
1250 | } |
1251 | return rc; |
1252 | } |
1253 | EXPORT_SYMBOL(dma_async_device_register); |
1254 | |
1255 | /** |
1256 | * dma_async_device_unregister - unregister a DMA device |
1257 | * @device: pointer to &struct dma_device |
1258 | * |
1259 | * This routine is called by dma driver exit routines, dmaengine holds module |
1260 | * references to prevent it being called while channels are in use. |
1261 | */ |
1262 | void dma_async_device_unregister(struct dma_device *device) |
1263 | { |
1264 | struct dma_chan *chan, *n; |
1265 | |
1266 | dmaengine_debug_unregister(dma_dev: device); |
1267 | |
1268 | list_for_each_entry_safe(chan, n, &device->channels, device_node) |
1269 | __dma_async_device_channel_unregister(device, chan); |
1270 | |
1271 | mutex_lock(&dma_list_mutex); |
1272 | /* |
1273 | * setting DMA_PRIVATE ensures the device being torn down will not |
1274 | * be used in the channel_table |
1275 | */ |
1276 | dma_cap_set(DMA_PRIVATE, device->cap_mask); |
1277 | dma_channel_rebalance(); |
1278 | ida_free(&dma_ida, id: device->dev_id); |
1279 | dma_device_put(device); |
1280 | mutex_unlock(lock: &dma_list_mutex); |
1281 | } |
1282 | EXPORT_SYMBOL(dma_async_device_unregister); |
1283 | |
1284 | static void dmaenginem_async_device_unregister(void *device) |
1285 | { |
1286 | dma_async_device_unregister(device); |
1287 | } |
1288 | |
1289 | /** |
1290 | * dmaenginem_async_device_register - registers DMA devices found |
1291 | * @device: pointer to &struct dma_device |
1292 | * |
1293 | * The operation is managed and will be undone on driver detach. |
1294 | */ |
1295 | int dmaenginem_async_device_register(struct dma_device *device) |
1296 | { |
1297 | int ret; |
1298 | |
1299 | ret = dma_async_device_register(device); |
1300 | if (ret) |
1301 | return ret; |
1302 | |
1303 | return devm_add_action_or_reset(device->dev, dmaenginem_async_device_unregister, device); |
1304 | } |
1305 | EXPORT_SYMBOL(dmaenginem_async_device_register); |
1306 | |
1307 | struct dmaengine_unmap_pool { |
1308 | struct kmem_cache *cache; |
1309 | const char *name; |
1310 | mempool_t *pool; |
1311 | size_t size; |
1312 | }; |
1313 | |
1314 | #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } |
1315 | static struct dmaengine_unmap_pool unmap_pool[] = { |
1316 | __UNMAP_POOL(2), |
1317 | #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) |
1318 | __UNMAP_POOL(16), |
1319 | __UNMAP_POOL(128), |
1320 | __UNMAP_POOL(256), |
1321 | #endif |
1322 | }; |
1323 | |
1324 | static struct dmaengine_unmap_pool *__get_unmap_pool(int nr) |
1325 | { |
1326 | int order = get_count_order(count: nr); |
1327 | |
1328 | switch (order) { |
1329 | case 0 ... 1: |
1330 | return &unmap_pool[0]; |
1331 | #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) |
1332 | case 2 ... 4: |
1333 | return &unmap_pool[1]; |
1334 | case 5 ... 7: |
1335 | return &unmap_pool[2]; |
1336 | case 8: |
1337 | return &unmap_pool[3]; |
1338 | #endif |
1339 | default: |
1340 | BUG(); |
1341 | return NULL; |
1342 | } |
1343 | } |
1344 | |
1345 | static void dmaengine_unmap(struct kref *kref) |
1346 | { |
1347 | struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref); |
1348 | struct device *dev = unmap->dev; |
1349 | int cnt, i; |
1350 | |
1351 | cnt = unmap->to_cnt; |
1352 | for (i = 0; i < cnt; i++) |
1353 | dma_unmap_page(dev, unmap->addr[i], unmap->len, |
1354 | DMA_TO_DEVICE); |
1355 | cnt += unmap->from_cnt; |
1356 | for (; i < cnt; i++) |
1357 | dma_unmap_page(dev, unmap->addr[i], unmap->len, |
1358 | DMA_FROM_DEVICE); |
1359 | cnt += unmap->bidi_cnt; |
1360 | for (; i < cnt; i++) { |
1361 | if (unmap->addr[i] == 0) |
1362 | continue; |
1363 | dma_unmap_page(dev, unmap->addr[i], unmap->len, |
1364 | DMA_BIDIRECTIONAL); |
1365 | } |
1366 | cnt = unmap->map_cnt; |
1367 | mempool_free(element: unmap, pool: __get_unmap_pool(nr: cnt)->pool); |
1368 | } |
1369 | |
1370 | void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) |
1371 | { |
1372 | if (unmap) |
1373 | kref_put(kref: &unmap->kref, release: dmaengine_unmap); |
1374 | } |
1375 | EXPORT_SYMBOL_GPL(dmaengine_unmap_put); |
1376 | |
1377 | static void dmaengine_destroy_unmap_pool(void) |
1378 | { |
1379 | int i; |
1380 | |
1381 | for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { |
1382 | struct dmaengine_unmap_pool *p = &unmap_pool[i]; |
1383 | |
1384 | mempool_destroy(pool: p->pool); |
1385 | p->pool = NULL; |
1386 | kmem_cache_destroy(s: p->cache); |
1387 | p->cache = NULL; |
1388 | } |
1389 | } |
1390 | |
1391 | static int __init dmaengine_init_unmap_pool(void) |
1392 | { |
1393 | int i; |
1394 | |
1395 | for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { |
1396 | struct dmaengine_unmap_pool *p = &unmap_pool[i]; |
1397 | size_t size; |
1398 | |
1399 | size = sizeof(struct dmaengine_unmap_data) + |
1400 | sizeof(dma_addr_t) * p->size; |
1401 | |
1402 | p->cache = kmem_cache_create(name: p->name, size, align: 0, |
1403 | SLAB_HWCACHE_ALIGN, NULL); |
1404 | if (!p->cache) |
1405 | break; |
1406 | p->pool = mempool_create_slab_pool(min_nr: 1, kc: p->cache); |
1407 | if (!p->pool) |
1408 | break; |
1409 | } |
1410 | |
1411 | if (i == ARRAY_SIZE(unmap_pool)) |
1412 | return 0; |
1413 | |
1414 | dmaengine_destroy_unmap_pool(); |
1415 | return -ENOMEM; |
1416 | } |
1417 | |
1418 | struct dmaengine_unmap_data * |
1419 | dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) |
1420 | { |
1421 | struct dmaengine_unmap_data *unmap; |
1422 | |
1423 | unmap = mempool_alloc(pool: __get_unmap_pool(nr)->pool, gfp_mask: flags); |
1424 | if (!unmap) |
1425 | return NULL; |
1426 | |
1427 | memset(unmap, 0, sizeof(*unmap)); |
1428 | kref_init(kref: &unmap->kref); |
1429 | unmap->dev = dev; |
1430 | unmap->map_cnt = nr; |
1431 | |
1432 | return unmap; |
1433 | } |
1434 | EXPORT_SYMBOL(dmaengine_get_unmap_data); |
1435 | |
1436 | void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, |
1437 | struct dma_chan *chan) |
1438 | { |
1439 | tx->chan = chan; |
1440 | #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH |
1441 | spin_lock_init(&tx->lock); |
1442 | #endif |
1443 | } |
1444 | EXPORT_SYMBOL(dma_async_tx_descriptor_init); |
1445 | |
1446 | static inline int desc_check_and_set_metadata_mode( |
1447 | struct dma_async_tx_descriptor *desc, enum dma_desc_metadata_mode mode) |
1448 | { |
1449 | /* Make sure that the metadata mode is not mixed */ |
1450 | if (!desc->desc_metadata_mode) { |
1451 | if (dmaengine_is_metadata_mode_supported(chan: desc->chan, mode)) |
1452 | desc->desc_metadata_mode = mode; |
1453 | else |
1454 | return -ENOTSUPP; |
1455 | } else if (desc->desc_metadata_mode != mode) { |
1456 | return -EINVAL; |
1457 | } |
1458 | |
1459 | return 0; |
1460 | } |
1461 | |
1462 | int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc, |
1463 | void *data, size_t len) |
1464 | { |
1465 | int ret; |
1466 | |
1467 | if (!desc) |
1468 | return -EINVAL; |
1469 | |
1470 | ret = desc_check_and_set_metadata_mode(desc, mode: DESC_METADATA_CLIENT); |
1471 | if (ret) |
1472 | return ret; |
1473 | |
1474 | if (!desc->metadata_ops || !desc->metadata_ops->attach) |
1475 | return -ENOTSUPP; |
1476 | |
1477 | return desc->metadata_ops->attach(desc, data, len); |
1478 | } |
1479 | EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata); |
1480 | |
1481 | void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc, |
1482 | size_t *payload_len, size_t *max_len) |
1483 | { |
1484 | int ret; |
1485 | |
1486 | if (!desc) |
1487 | return ERR_PTR(error: -EINVAL); |
1488 | |
1489 | ret = desc_check_and_set_metadata_mode(desc, mode: DESC_METADATA_ENGINE); |
1490 | if (ret) |
1491 | return ERR_PTR(error: ret); |
1492 | |
1493 | if (!desc->metadata_ops || !desc->metadata_ops->get_ptr) |
1494 | return ERR_PTR(error: -ENOTSUPP); |
1495 | |
1496 | return desc->metadata_ops->get_ptr(desc, payload_len, max_len); |
1497 | } |
1498 | EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr); |
1499 | |
1500 | int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc, |
1501 | size_t payload_len) |
1502 | { |
1503 | int ret; |
1504 | |
1505 | if (!desc) |
1506 | return -EINVAL; |
1507 | |
1508 | ret = desc_check_and_set_metadata_mode(desc, mode: DESC_METADATA_ENGINE); |
1509 | if (ret) |
1510 | return ret; |
1511 | |
1512 | if (!desc->metadata_ops || !desc->metadata_ops->set_len) |
1513 | return -ENOTSUPP; |
1514 | |
1515 | return desc->metadata_ops->set_len(desc, payload_len); |
1516 | } |
1517 | EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len); |
1518 | |
1519 | /** |
1520 | * dma_wait_for_async_tx - spin wait for a transaction to complete |
1521 | * @tx: in-flight transaction to wait on |
1522 | */ |
1523 | enum dma_status |
1524 | dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) |
1525 | { |
1526 | unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(m: 5000); |
1527 | |
1528 | if (!tx) |
1529 | return DMA_COMPLETE; |
1530 | |
1531 | while (tx->cookie == -EBUSY) { |
1532 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { |
1533 | dev_err(tx->chan->device->dev, |
1534 | "%s timeout waiting for descriptor submission\n" , |
1535 | __func__); |
1536 | return DMA_ERROR; |
1537 | } |
1538 | cpu_relax(); |
1539 | } |
1540 | return dma_sync_wait(tx->chan, tx->cookie); |
1541 | } |
1542 | EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); |
1543 | |
1544 | /** |
1545 | * dma_run_dependencies - process dependent operations on the target channel |
1546 | * @tx: transaction with dependencies |
1547 | * |
1548 | * Helper routine for DMA drivers to process (start) dependent operations |
1549 | * on their target channel. |
1550 | */ |
1551 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx) |
1552 | { |
1553 | struct dma_async_tx_descriptor *dep = txd_next(txd: tx); |
1554 | struct dma_async_tx_descriptor *dep_next; |
1555 | struct dma_chan *chan; |
1556 | |
1557 | if (!dep) |
1558 | return; |
1559 | |
1560 | /* we'll submit tx->next now, so clear the link */ |
1561 | txd_clear_next(txd: tx); |
1562 | chan = dep->chan; |
1563 | |
1564 | /* keep submitting up until a channel switch is detected |
1565 | * in that case we will be called again as a result of |
1566 | * processing the interrupt from async_tx_channel_switch |
1567 | */ |
1568 | for (; dep; dep = dep_next) { |
1569 | txd_lock(txd: dep); |
1570 | txd_clear_parent(txd: dep); |
1571 | dep_next = txd_next(txd: dep); |
1572 | if (dep_next && dep_next->chan == chan) |
1573 | txd_clear_next(txd: dep); /* ->next will be submitted */ |
1574 | else |
1575 | dep_next = NULL; /* submit current dep and terminate */ |
1576 | txd_unlock(txd: dep); |
1577 | |
1578 | dep->tx_submit(dep); |
1579 | } |
1580 | |
1581 | chan->device->device_issue_pending(chan); |
1582 | } |
1583 | EXPORT_SYMBOL_GPL(dma_run_dependencies); |
1584 | |
1585 | static int __init dma_bus_init(void) |
1586 | { |
1587 | int err = dmaengine_init_unmap_pool(); |
1588 | |
1589 | if (err) |
1590 | return err; |
1591 | |
1592 | err = class_register(class: &dma_devclass); |
1593 | if (!err) |
1594 | dmaengine_debugfs_init(); |
1595 | |
1596 | return err; |
1597 | } |
1598 | arch_initcall(dma_bus_init); |
1599 | |