1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2014 Texas Instruments Incorporated |
4 | * Authors: Santosh Shilimkar <santosh.shilimkar@ti.com> |
5 | * Sandeep Nair <sandeep_n@ti.com> |
6 | * Cyril Chemparathy <cyril@ti.com> |
7 | */ |
8 | |
9 | #include <linux/io.h> |
10 | #include <linux/sched.h> |
11 | #include <linux/module.h> |
12 | #include <linux/dma-direction.h> |
13 | #include <linux/interrupt.h> |
14 | #include <linux/pm_runtime.h> |
15 | #include <linux/of_dma.h> |
16 | #include <linux/of_address.h> |
17 | #include <linux/platform_device.h> |
18 | #include <linux/soc/ti/knav_dma.h> |
19 | #include <linux/debugfs.h> |
20 | #include <linux/seq_file.h> |
21 | |
22 | #define REG_MASK 0xffffffff |
23 | |
24 | #define DMA_LOOPBACK BIT(31) |
25 | #define DMA_ENABLE BIT(31) |
26 | #define DMA_TEARDOWN BIT(30) |
27 | |
28 | #define DMA_TX_FILT_PSWORDS BIT(29) |
29 | #define DMA_TX_FILT_EINFO BIT(30) |
30 | #define DMA_TX_PRIO_SHIFT 0 |
31 | #define DMA_RX_PRIO_SHIFT 16 |
32 | #define DMA_PRIO_MASK GENMASK(3, 0) |
33 | #define DMA_PRIO_DEFAULT 0 |
34 | #define DMA_RX_TIMEOUT_DEFAULT 17500 /* cycles */ |
35 | #define DMA_RX_TIMEOUT_MASK GENMASK(16, 0) |
36 | #define DMA_RX_TIMEOUT_SHIFT 0 |
37 | |
38 | #define CHAN_HAS_EPIB BIT(30) |
39 | #define CHAN_HAS_PSINFO BIT(29) |
40 | #define CHAN_ERR_RETRY BIT(28) |
41 | #define CHAN_PSINFO_AT_SOP BIT(25) |
42 | #define CHAN_SOP_OFF_SHIFT 16 |
43 | #define CHAN_SOP_OFF_MASK GENMASK(9, 0) |
44 | #define DESC_TYPE_SHIFT 26 |
45 | #define DESC_TYPE_MASK GENMASK(2, 0) |
46 | |
47 | /* |
48 | * QMGR & QNUM together make up 14 bits with QMGR as the 2 MSb's in the logical |
49 | * navigator cloud mapping scheme. |
50 | * using the 14bit physical queue numbers directly maps into this scheme. |
51 | */ |
52 | #define CHAN_QNUM_MASK GENMASK(14, 0) |
53 | #define DMA_MAX_QMS 4 |
54 | #define DMA_TIMEOUT 1 /* msecs */ |
55 | #define DMA_INVALID_ID 0xffff |
56 | |
57 | struct reg_global { |
58 | u32 revision; |
59 | u32 perf_control; |
60 | u32 emulation_control; |
61 | u32 priority_control; |
62 | u32 qm_base_address[DMA_MAX_QMS]; |
63 | }; |
64 | |
65 | struct reg_chan { |
66 | u32 control; |
67 | u32 mode; |
68 | u32 __rsvd[6]; |
69 | }; |
70 | |
71 | struct reg_tx_sched { |
72 | u32 prio; |
73 | }; |
74 | |
75 | struct reg_rx_flow { |
76 | u32 control; |
77 | u32 tags; |
78 | u32 tag_sel; |
79 | u32 fdq_sel[2]; |
80 | u32 thresh[3]; |
81 | }; |
82 | |
83 | struct knav_dma_pool_device { |
84 | struct device *dev; |
85 | struct list_head list; |
86 | }; |
87 | |
88 | struct knav_dma_device { |
89 | bool loopback, enable_all; |
90 | unsigned tx_priority, rx_priority, rx_timeout; |
91 | unsigned logical_queue_managers; |
92 | unsigned qm_base_address[DMA_MAX_QMS]; |
93 | struct reg_global __iomem *reg_global; |
94 | struct reg_chan __iomem *reg_tx_chan; |
95 | struct reg_rx_flow __iomem *reg_rx_flow; |
96 | struct reg_chan __iomem *reg_rx_chan; |
97 | struct reg_tx_sched __iomem *reg_tx_sched; |
98 | unsigned max_rx_chan, max_tx_chan; |
99 | unsigned max_rx_flow; |
100 | char name[32]; |
101 | atomic_t ref_count; |
102 | struct list_head list; |
103 | struct list_head chan_list; |
104 | spinlock_t lock; |
105 | }; |
106 | |
107 | struct knav_dma_chan { |
108 | enum dma_transfer_direction direction; |
109 | struct knav_dma_device *dma; |
110 | atomic_t ref_count; |
111 | |
112 | /* registers */ |
113 | struct reg_chan __iomem *reg_chan; |
114 | struct reg_tx_sched __iomem *reg_tx_sched; |
115 | struct reg_rx_flow __iomem *reg_rx_flow; |
116 | |
117 | /* configuration stuff */ |
118 | unsigned channel, flow; |
119 | struct knav_dma_cfg cfg; |
120 | struct list_head list; |
121 | spinlock_t lock; |
122 | }; |
123 | |
124 | #define chan_number(ch) ((ch->direction == DMA_MEM_TO_DEV) ? \ |
125 | ch->channel : ch->flow) |
126 | |
127 | static struct knav_dma_pool_device *kdev; |
128 | |
129 | static bool device_ready; |
130 | bool knav_dma_device_ready(void) |
131 | { |
132 | return device_ready; |
133 | } |
134 | EXPORT_SYMBOL_GPL(knav_dma_device_ready); |
135 | |
136 | static bool check_config(struct knav_dma_chan *chan, struct knav_dma_cfg *cfg) |
137 | { |
138 | if (!memcmp(p: &chan->cfg, q: cfg, size: sizeof(*cfg))) |
139 | return true; |
140 | else |
141 | return false; |
142 | } |
143 | |
144 | static int chan_start(struct knav_dma_chan *chan, |
145 | struct knav_dma_cfg *cfg) |
146 | { |
147 | u32 v = 0; |
148 | |
149 | spin_lock(lock: &chan->lock); |
150 | if ((chan->direction == DMA_MEM_TO_DEV) && chan->reg_chan) { |
151 | if (cfg->u.tx.filt_pswords) |
152 | v |= DMA_TX_FILT_PSWORDS; |
153 | if (cfg->u.tx.filt_einfo) |
154 | v |= DMA_TX_FILT_EINFO; |
155 | writel_relaxed(v, &chan->reg_chan->mode); |
156 | writel_relaxed(DMA_ENABLE, &chan->reg_chan->control); |
157 | } |
158 | |
159 | if (chan->reg_tx_sched) |
160 | writel_relaxed(cfg->u.tx.priority, &chan->reg_tx_sched->prio); |
161 | |
162 | if (chan->reg_rx_flow) { |
163 | v = 0; |
164 | |
165 | if (cfg->u.rx.einfo_present) |
166 | v |= CHAN_HAS_EPIB; |
167 | if (cfg->u.rx.psinfo_present) |
168 | v |= CHAN_HAS_PSINFO; |
169 | if (cfg->u.rx.err_mode == DMA_RETRY) |
170 | v |= CHAN_ERR_RETRY; |
171 | v |= (cfg->u.rx.desc_type & DESC_TYPE_MASK) << DESC_TYPE_SHIFT; |
172 | if (cfg->u.rx.psinfo_at_sop) |
173 | v |= CHAN_PSINFO_AT_SOP; |
174 | v |= (cfg->u.rx.sop_offset & CHAN_SOP_OFF_MASK) |
175 | << CHAN_SOP_OFF_SHIFT; |
176 | v |= cfg->u.rx.dst_q & CHAN_QNUM_MASK; |
177 | |
178 | writel_relaxed(v, &chan->reg_rx_flow->control); |
179 | writel_relaxed(0, &chan->reg_rx_flow->tags); |
180 | writel_relaxed(0, &chan->reg_rx_flow->tag_sel); |
181 | |
182 | v = cfg->u.rx.fdq[0] << 16; |
183 | v |= cfg->u.rx.fdq[1] & CHAN_QNUM_MASK; |
184 | writel_relaxed(v, &chan->reg_rx_flow->fdq_sel[0]); |
185 | |
186 | v = cfg->u.rx.fdq[2] << 16; |
187 | v |= cfg->u.rx.fdq[3] & CHAN_QNUM_MASK; |
188 | writel_relaxed(v, &chan->reg_rx_flow->fdq_sel[1]); |
189 | |
190 | writel_relaxed(0, &chan->reg_rx_flow->thresh[0]); |
191 | writel_relaxed(0, &chan->reg_rx_flow->thresh[1]); |
192 | writel_relaxed(0, &chan->reg_rx_flow->thresh[2]); |
193 | } |
194 | |
195 | /* Keep a copy of the cfg */ |
196 | memcpy(&chan->cfg, cfg, sizeof(*cfg)); |
197 | spin_unlock(lock: &chan->lock); |
198 | |
199 | return 0; |
200 | } |
201 | |
202 | static int chan_teardown(struct knav_dma_chan *chan) |
203 | { |
204 | unsigned long end, value; |
205 | |
206 | if (!chan->reg_chan) |
207 | return 0; |
208 | |
209 | /* indicate teardown */ |
210 | writel_relaxed(DMA_TEARDOWN, &chan->reg_chan->control); |
211 | |
212 | /* wait for the dma to shut itself down */ |
213 | end = jiffies + msecs_to_jiffies(DMA_TIMEOUT); |
214 | do { |
215 | value = readl_relaxed(&chan->reg_chan->control); |
216 | if ((value & DMA_ENABLE) == 0) |
217 | break; |
218 | } while (time_after(end, jiffies)); |
219 | |
220 | if (readl_relaxed(&chan->reg_chan->control) & DMA_ENABLE) { |
221 | dev_err(kdev->dev, "timeout waiting for teardown\n" ); |
222 | return -ETIMEDOUT; |
223 | } |
224 | |
225 | return 0; |
226 | } |
227 | |
228 | static void chan_stop(struct knav_dma_chan *chan) |
229 | { |
230 | spin_lock(lock: &chan->lock); |
231 | if (chan->reg_rx_flow) { |
232 | /* first detach fdqs, starve out the flow */ |
233 | writel_relaxed(0, &chan->reg_rx_flow->fdq_sel[0]); |
234 | writel_relaxed(0, &chan->reg_rx_flow->fdq_sel[1]); |
235 | writel_relaxed(0, &chan->reg_rx_flow->thresh[0]); |
236 | writel_relaxed(0, &chan->reg_rx_flow->thresh[1]); |
237 | writel_relaxed(0, &chan->reg_rx_flow->thresh[2]); |
238 | } |
239 | |
240 | /* teardown the dma channel */ |
241 | chan_teardown(chan); |
242 | |
243 | /* then disconnect the completion side */ |
244 | if (chan->reg_rx_flow) { |
245 | writel_relaxed(0, &chan->reg_rx_flow->control); |
246 | writel_relaxed(0, &chan->reg_rx_flow->tags); |
247 | writel_relaxed(0, &chan->reg_rx_flow->tag_sel); |
248 | } |
249 | |
250 | memset(&chan->cfg, 0, sizeof(struct knav_dma_cfg)); |
251 | spin_unlock(lock: &chan->lock); |
252 | |
253 | dev_dbg(kdev->dev, "channel stopped\n" ); |
254 | } |
255 | |
256 | static void dma_hw_enable_all(struct knav_dma_device *dma) |
257 | { |
258 | int i; |
259 | |
260 | for (i = 0; i < dma->max_tx_chan; i++) { |
261 | writel_relaxed(0, &dma->reg_tx_chan[i].mode); |
262 | writel_relaxed(DMA_ENABLE, &dma->reg_tx_chan[i].control); |
263 | } |
264 | } |
265 | |
266 | |
267 | static void knav_dma_hw_init(struct knav_dma_device *dma) |
268 | { |
269 | unsigned v; |
270 | int i; |
271 | |
272 | spin_lock(lock: &dma->lock); |
273 | v = dma->loopback ? DMA_LOOPBACK : 0; |
274 | writel_relaxed(v, &dma->reg_global->emulation_control); |
275 | |
276 | v = readl_relaxed(&dma->reg_global->perf_control); |
277 | v |= ((dma->rx_timeout & DMA_RX_TIMEOUT_MASK) << DMA_RX_TIMEOUT_SHIFT); |
278 | writel_relaxed(v, &dma->reg_global->perf_control); |
279 | |
280 | v = ((dma->tx_priority << DMA_TX_PRIO_SHIFT) | |
281 | (dma->rx_priority << DMA_RX_PRIO_SHIFT)); |
282 | |
283 | writel_relaxed(v, &dma->reg_global->priority_control); |
284 | |
285 | /* Always enable all Rx channels. Rx paths are managed using flows */ |
286 | for (i = 0; i < dma->max_rx_chan; i++) |
287 | writel_relaxed(DMA_ENABLE, &dma->reg_rx_chan[i].control); |
288 | |
289 | for (i = 0; i < dma->logical_queue_managers; i++) |
290 | writel_relaxed(dma->qm_base_address[i], |
291 | &dma->reg_global->qm_base_address[i]); |
292 | spin_unlock(lock: &dma->lock); |
293 | } |
294 | |
295 | static void knav_dma_hw_destroy(struct knav_dma_device *dma) |
296 | { |
297 | int i; |
298 | unsigned v; |
299 | |
300 | spin_lock(lock: &dma->lock); |
301 | v = ~DMA_ENABLE & REG_MASK; |
302 | |
303 | for (i = 0; i < dma->max_rx_chan; i++) |
304 | writel_relaxed(v, &dma->reg_rx_chan[i].control); |
305 | |
306 | for (i = 0; i < dma->max_tx_chan; i++) |
307 | writel_relaxed(v, &dma->reg_tx_chan[i].control); |
308 | spin_unlock(lock: &dma->lock); |
309 | } |
310 | |
311 | static void dma_debug_show_channels(struct seq_file *s, |
312 | struct knav_dma_chan *chan) |
313 | { |
314 | int i; |
315 | |
316 | seq_printf(m: s, fmt: "\t%s %d:\t" , |
317 | ((chan->direction == DMA_MEM_TO_DEV) ? "tx chan" : "rx flow" ), |
318 | chan_number(chan)); |
319 | |
320 | if (chan->direction == DMA_MEM_TO_DEV) { |
321 | seq_printf(m: s, fmt: "einfo - %d, pswords - %d, priority - %d\n" , |
322 | chan->cfg.u.tx.filt_einfo, |
323 | chan->cfg.u.tx.filt_pswords, |
324 | chan->cfg.u.tx.priority); |
325 | } else { |
326 | seq_printf(m: s, fmt: "einfo - %d, psinfo - %d, desc_type - %d\n" , |
327 | chan->cfg.u.rx.einfo_present, |
328 | chan->cfg.u.rx.psinfo_present, |
329 | chan->cfg.u.rx.desc_type); |
330 | seq_printf(m: s, fmt: "\t\t\tdst_q: [%d], thresh: %d fdq: " , |
331 | chan->cfg.u.rx.dst_q, |
332 | chan->cfg.u.rx.thresh); |
333 | for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; i++) |
334 | seq_printf(m: s, fmt: "[%d]" , chan->cfg.u.rx.fdq[i]); |
335 | seq_printf(m: s, fmt: "\n" ); |
336 | } |
337 | } |
338 | |
339 | static void dma_debug_show_devices(struct seq_file *s, |
340 | struct knav_dma_device *dma) |
341 | { |
342 | struct knav_dma_chan *chan; |
343 | |
344 | list_for_each_entry(chan, &dma->chan_list, list) { |
345 | if (atomic_read(v: &chan->ref_count)) |
346 | dma_debug_show_channels(s, chan); |
347 | } |
348 | } |
349 | |
350 | static int knav_dma_debug_show(struct seq_file *s, void *v) |
351 | { |
352 | struct knav_dma_device *dma; |
353 | |
354 | list_for_each_entry(dma, &kdev->list, list) { |
355 | if (atomic_read(v: &dma->ref_count)) { |
356 | seq_printf(m: s, fmt: "%s : max_tx_chan: (%d), max_rx_flows: (%d)\n" , |
357 | dma->name, dma->max_tx_chan, dma->max_rx_flow); |
358 | dma_debug_show_devices(s, dma); |
359 | } |
360 | } |
361 | |
362 | return 0; |
363 | } |
364 | |
365 | DEFINE_SHOW_ATTRIBUTE(knav_dma_debug); |
366 | |
367 | static int of_channel_match_helper(struct device_node *np, const char *name, |
368 | const char **dma_instance) |
369 | { |
370 | struct of_phandle_args args; |
371 | struct device_node *dma_node; |
372 | int index; |
373 | |
374 | dma_node = of_parse_phandle(np, phandle_name: "ti,navigator-dmas" , index: 0); |
375 | if (!dma_node) |
376 | return -ENODEV; |
377 | |
378 | *dma_instance = dma_node->name; |
379 | index = of_property_match_string(np, propname: "ti,navigator-dma-names" , string: name); |
380 | if (index < 0) { |
381 | dev_err(kdev->dev, "No 'ti,navigator-dma-names' property\n" ); |
382 | return -ENODEV; |
383 | } |
384 | |
385 | if (of_parse_phandle_with_fixed_args(np, list_name: "ti,navigator-dmas" , |
386 | cell_count: 1, index, out_args: &args)) { |
387 | dev_err(kdev->dev, "Missing the phandle args name %s\n" , name); |
388 | return -ENODEV; |
389 | } |
390 | |
391 | if (args.args[0] < 0) { |
392 | dev_err(kdev->dev, "Missing args for %s\n" , name); |
393 | return -ENODEV; |
394 | } |
395 | |
396 | return args.args[0]; |
397 | } |
398 | |
399 | /** |
400 | * knav_dma_open_channel() - try to setup an exclusive slave channel |
401 | * @dev: pointer to client device structure |
402 | * @name: slave channel name |
403 | * @config: dma configuration parameters |
404 | * |
405 | * Returns pointer to appropriate DMA channel on success or error. |
406 | */ |
407 | void *knav_dma_open_channel(struct device *dev, const char *name, |
408 | struct knav_dma_cfg *config) |
409 | { |
410 | struct knav_dma_device *dma = NULL, *iter1; |
411 | struct knav_dma_chan *chan = NULL, *iter2; |
412 | int chan_num = -1; |
413 | const char *instance; |
414 | |
415 | if (!kdev) { |
416 | pr_err("keystone-navigator-dma driver not registered\n" ); |
417 | return (void *)-EINVAL; |
418 | } |
419 | |
420 | chan_num = of_channel_match_helper(np: dev->of_node, name, dma_instance: &instance); |
421 | if (chan_num < 0) { |
422 | dev_err(kdev->dev, "No DMA instance with name %s\n" , name); |
423 | return (void *)-EINVAL; |
424 | } |
425 | |
426 | dev_dbg(kdev->dev, "initializing %s channel %d from DMA %s\n" , |
427 | config->direction == DMA_MEM_TO_DEV ? "transmit" : |
428 | config->direction == DMA_DEV_TO_MEM ? "receive" : |
429 | "unknown" , chan_num, instance); |
430 | |
431 | if (config->direction != DMA_MEM_TO_DEV && |
432 | config->direction != DMA_DEV_TO_MEM) { |
433 | dev_err(kdev->dev, "bad direction\n" ); |
434 | return (void *)-EINVAL; |
435 | } |
436 | |
437 | /* Look for correct dma instance */ |
438 | list_for_each_entry(iter1, &kdev->list, list) { |
439 | if (!strcmp(iter1->name, instance)) { |
440 | dma = iter1; |
441 | break; |
442 | } |
443 | } |
444 | if (!dma) { |
445 | dev_err(kdev->dev, "No DMA instance with name %s\n" , instance); |
446 | return (void *)-EINVAL; |
447 | } |
448 | |
449 | /* Look for correct dma channel from dma instance */ |
450 | list_for_each_entry(iter2, &dma->chan_list, list) { |
451 | if (config->direction == DMA_MEM_TO_DEV) { |
452 | if (iter2->channel == chan_num) { |
453 | chan = iter2; |
454 | break; |
455 | } |
456 | } else { |
457 | if (iter2->flow == chan_num) { |
458 | chan = iter2; |
459 | break; |
460 | } |
461 | } |
462 | } |
463 | if (!chan) { |
464 | dev_err(kdev->dev, "channel %d is not in DMA %s\n" , |
465 | chan_num, instance); |
466 | return (void *)-EINVAL; |
467 | } |
468 | |
469 | if (atomic_read(v: &chan->ref_count) >= 1) { |
470 | if (!check_config(chan, cfg: config)) { |
471 | dev_err(kdev->dev, "channel %d config miss-match\n" , |
472 | chan_num); |
473 | return (void *)-EINVAL; |
474 | } |
475 | } |
476 | |
477 | if (atomic_inc_return(v: &chan->dma->ref_count) <= 1) |
478 | knav_dma_hw_init(dma: chan->dma); |
479 | |
480 | if (atomic_inc_return(v: &chan->ref_count) <= 1) |
481 | chan_start(chan, cfg: config); |
482 | |
483 | dev_dbg(kdev->dev, "channel %d opened from DMA %s\n" , |
484 | chan_num, instance); |
485 | |
486 | return chan; |
487 | } |
488 | EXPORT_SYMBOL_GPL(knav_dma_open_channel); |
489 | |
490 | /** |
491 | * knav_dma_close_channel() - Destroy a dma channel |
492 | * |
493 | * @channel: dma channel handle |
494 | * |
495 | */ |
496 | void knav_dma_close_channel(void *channel) |
497 | { |
498 | struct knav_dma_chan *chan = channel; |
499 | |
500 | if (!kdev) { |
501 | pr_err("keystone-navigator-dma driver not registered\n" ); |
502 | return; |
503 | } |
504 | |
505 | if (atomic_dec_return(v: &chan->ref_count) <= 0) |
506 | chan_stop(chan); |
507 | |
508 | if (atomic_dec_return(v: &chan->dma->ref_count) <= 0) |
509 | knav_dma_hw_destroy(dma: chan->dma); |
510 | |
511 | dev_dbg(kdev->dev, "channel %d or flow %d closed from DMA %s\n" , |
512 | chan->channel, chan->flow, chan->dma->name); |
513 | } |
514 | EXPORT_SYMBOL_GPL(knav_dma_close_channel); |
515 | |
516 | static void __iomem *pktdma_get_regs(struct knav_dma_device *dma, |
517 | struct device_node *node, |
518 | unsigned index, resource_size_t *_size) |
519 | { |
520 | struct device *dev = kdev->dev; |
521 | struct resource res; |
522 | void __iomem *regs; |
523 | int ret; |
524 | |
525 | ret = of_address_to_resource(dev: node, index, r: &res); |
526 | if (ret) { |
527 | dev_err(dev, "Can't translate of node(%pOFn) address for index(%d)\n" , |
528 | node, index); |
529 | return ERR_PTR(error: ret); |
530 | } |
531 | |
532 | regs = devm_ioremap_resource(dev: kdev->dev, res: &res); |
533 | if (IS_ERR(ptr: regs)) |
534 | dev_err(dev, "Failed to map register base for index(%d) node(%pOFn)\n" , |
535 | index, node); |
536 | if (_size) |
537 | *_size = resource_size(res: &res); |
538 | |
539 | return regs; |
540 | } |
541 | |
542 | static int pktdma_init_rx_chan(struct knav_dma_chan *chan, u32 flow) |
543 | { |
544 | struct knav_dma_device *dma = chan->dma; |
545 | |
546 | chan->flow = flow; |
547 | chan->reg_rx_flow = dma->reg_rx_flow + flow; |
548 | chan->channel = DMA_INVALID_ID; |
549 | dev_dbg(kdev->dev, "rx flow(%d) (%p)\n" , chan->flow, chan->reg_rx_flow); |
550 | |
551 | return 0; |
552 | } |
553 | |
554 | static int pktdma_init_tx_chan(struct knav_dma_chan *chan, u32 channel) |
555 | { |
556 | struct knav_dma_device *dma = chan->dma; |
557 | |
558 | chan->channel = channel; |
559 | chan->reg_chan = dma->reg_tx_chan + channel; |
560 | chan->reg_tx_sched = dma->reg_tx_sched + channel; |
561 | chan->flow = DMA_INVALID_ID; |
562 | dev_dbg(kdev->dev, "tx channel(%d) (%p)\n" , chan->channel, chan->reg_chan); |
563 | |
564 | return 0; |
565 | } |
566 | |
567 | static int pktdma_init_chan(struct knav_dma_device *dma, |
568 | enum dma_transfer_direction dir, |
569 | unsigned chan_num) |
570 | { |
571 | struct device *dev = kdev->dev; |
572 | struct knav_dma_chan *chan; |
573 | int ret = -EINVAL; |
574 | |
575 | chan = devm_kzalloc(dev, size: sizeof(*chan), GFP_KERNEL); |
576 | if (!chan) |
577 | return -ENOMEM; |
578 | |
579 | INIT_LIST_HEAD(list: &chan->list); |
580 | chan->dma = dma; |
581 | chan->direction = DMA_TRANS_NONE; |
582 | atomic_set(v: &chan->ref_count, i: 0); |
583 | spin_lock_init(&chan->lock); |
584 | |
585 | if (dir == DMA_MEM_TO_DEV) { |
586 | chan->direction = dir; |
587 | ret = pktdma_init_tx_chan(chan, channel: chan_num); |
588 | } else if (dir == DMA_DEV_TO_MEM) { |
589 | chan->direction = dir; |
590 | ret = pktdma_init_rx_chan(chan, flow: chan_num); |
591 | } else { |
592 | dev_err(dev, "channel(%d) direction unknown\n" , chan_num); |
593 | } |
594 | |
595 | list_add_tail(new: &chan->list, head: &dma->chan_list); |
596 | |
597 | return ret; |
598 | } |
599 | |
600 | static int dma_init(struct device_node *cloud, struct device_node *dma_node) |
601 | { |
602 | unsigned max_tx_chan, max_rx_chan, max_rx_flow, max_tx_sched; |
603 | struct device_node *node = dma_node; |
604 | struct knav_dma_device *dma; |
605 | int ret, len, num_chan = 0; |
606 | resource_size_t size; |
607 | u32 timeout; |
608 | u32 i; |
609 | |
610 | dma = devm_kzalloc(dev: kdev->dev, size: sizeof(*dma), GFP_KERNEL); |
611 | if (!dma) { |
612 | dev_err(kdev->dev, "could not allocate driver mem\n" ); |
613 | return -ENOMEM; |
614 | } |
615 | INIT_LIST_HEAD(list: &dma->list); |
616 | INIT_LIST_HEAD(list: &dma->chan_list); |
617 | |
618 | if (!of_find_property(np: cloud, name: "ti,navigator-cloud-address" , lenp: &len)) { |
619 | dev_err(kdev->dev, "unspecified navigator cloud addresses\n" ); |
620 | return -ENODEV; |
621 | } |
622 | |
623 | dma->logical_queue_managers = len / sizeof(u32); |
624 | if (dma->logical_queue_managers > DMA_MAX_QMS) { |
625 | dev_warn(kdev->dev, "too many queue mgrs(>%d) rest ignored\n" , |
626 | dma->logical_queue_managers); |
627 | dma->logical_queue_managers = DMA_MAX_QMS; |
628 | } |
629 | |
630 | ret = of_property_read_u32_array(np: cloud, propname: "ti,navigator-cloud-address" , |
631 | out_values: dma->qm_base_address, |
632 | sz: dma->logical_queue_managers); |
633 | if (ret) { |
634 | dev_err(kdev->dev, "invalid navigator cloud addresses\n" ); |
635 | return -ENODEV; |
636 | } |
637 | |
638 | dma->reg_global = pktdma_get_regs(dma, node, index: 0, size: &size); |
639 | if (IS_ERR(ptr: dma->reg_global)) |
640 | return PTR_ERR(ptr: dma->reg_global); |
641 | if (size < sizeof(struct reg_global)) { |
642 | dev_err(kdev->dev, "bad size %pa for global regs\n" , &size); |
643 | return -ENODEV; |
644 | } |
645 | |
646 | dma->reg_tx_chan = pktdma_get_regs(dma, node, index: 1, size: &size); |
647 | if (IS_ERR(ptr: dma->reg_tx_chan)) |
648 | return PTR_ERR(ptr: dma->reg_tx_chan); |
649 | |
650 | max_tx_chan = size / sizeof(struct reg_chan); |
651 | dma->reg_rx_chan = pktdma_get_regs(dma, node, index: 2, size: &size); |
652 | if (IS_ERR(ptr: dma->reg_rx_chan)) |
653 | return PTR_ERR(ptr: dma->reg_rx_chan); |
654 | |
655 | max_rx_chan = size / sizeof(struct reg_chan); |
656 | dma->reg_tx_sched = pktdma_get_regs(dma, node, index: 3, size: &size); |
657 | if (IS_ERR(ptr: dma->reg_tx_sched)) |
658 | return PTR_ERR(ptr: dma->reg_tx_sched); |
659 | |
660 | max_tx_sched = size / sizeof(struct reg_tx_sched); |
661 | dma->reg_rx_flow = pktdma_get_regs(dma, node, index: 4, size: &size); |
662 | if (IS_ERR(ptr: dma->reg_rx_flow)) |
663 | return PTR_ERR(ptr: dma->reg_rx_flow); |
664 | |
665 | max_rx_flow = size / sizeof(struct reg_rx_flow); |
666 | dma->rx_priority = DMA_PRIO_DEFAULT; |
667 | dma->tx_priority = DMA_PRIO_DEFAULT; |
668 | |
669 | dma->enable_all = of_property_read_bool(np: node, propname: "ti,enable-all" ); |
670 | dma->loopback = of_property_read_bool(np: node, propname: "ti,loop-back" ); |
671 | |
672 | ret = of_property_read_u32(np: node, propname: "ti,rx-retry-timeout" , out_value: &timeout); |
673 | if (ret < 0) { |
674 | dev_dbg(kdev->dev, "unspecified rx timeout using value %d\n" , |
675 | DMA_RX_TIMEOUT_DEFAULT); |
676 | timeout = DMA_RX_TIMEOUT_DEFAULT; |
677 | } |
678 | |
679 | dma->rx_timeout = timeout; |
680 | dma->max_rx_chan = max_rx_chan; |
681 | dma->max_rx_flow = max_rx_flow; |
682 | dma->max_tx_chan = min(max_tx_chan, max_tx_sched); |
683 | atomic_set(v: &dma->ref_count, i: 0); |
684 | strcpy(p: dma->name, q: node->name); |
685 | spin_lock_init(&dma->lock); |
686 | |
687 | for (i = 0; i < dma->max_tx_chan; i++) { |
688 | if (pktdma_init_chan(dma, dir: DMA_MEM_TO_DEV, chan_num: i) >= 0) |
689 | num_chan++; |
690 | } |
691 | |
692 | for (i = 0; i < dma->max_rx_flow; i++) { |
693 | if (pktdma_init_chan(dma, dir: DMA_DEV_TO_MEM, chan_num: i) >= 0) |
694 | num_chan++; |
695 | } |
696 | |
697 | list_add_tail(new: &dma->list, head: &kdev->list); |
698 | |
699 | /* |
700 | * For DSP software usecases or userpace transport software, setup all |
701 | * the DMA hardware resources. |
702 | */ |
703 | if (dma->enable_all) { |
704 | atomic_inc(v: &dma->ref_count); |
705 | knav_dma_hw_init(dma); |
706 | dma_hw_enable_all(dma); |
707 | } |
708 | |
709 | dev_info(kdev->dev, "DMA %s registered %d logical channels, flows %d, tx chans: %d, rx chans: %d%s\n" , |
710 | dma->name, num_chan, dma->max_rx_flow, |
711 | dma->max_tx_chan, dma->max_rx_chan, |
712 | dma->loopback ? ", loopback" : "" ); |
713 | |
714 | return 0; |
715 | } |
716 | |
717 | static int knav_dma_probe(struct platform_device *pdev) |
718 | { |
719 | struct device *dev = &pdev->dev; |
720 | struct device_node *node = pdev->dev.of_node; |
721 | struct device_node *child; |
722 | int ret = 0; |
723 | |
724 | if (!node) { |
725 | dev_err(&pdev->dev, "could not find device info\n" ); |
726 | return -EINVAL; |
727 | } |
728 | |
729 | kdev = devm_kzalloc(dev, |
730 | size: sizeof(struct knav_dma_pool_device), GFP_KERNEL); |
731 | if (!kdev) { |
732 | dev_err(dev, "could not allocate driver mem\n" ); |
733 | return -ENOMEM; |
734 | } |
735 | |
736 | kdev->dev = dev; |
737 | INIT_LIST_HEAD(list: &kdev->list); |
738 | |
739 | pm_runtime_enable(dev: kdev->dev); |
740 | ret = pm_runtime_resume_and_get(dev: kdev->dev); |
741 | if (ret < 0) { |
742 | dev_err(kdev->dev, "unable to enable pktdma, err %d\n" , ret); |
743 | goto err_pm_disable; |
744 | } |
745 | |
746 | /* Initialise all packet dmas */ |
747 | for_each_child_of_node(node, child) { |
748 | ret = dma_init(cloud: node, dma_node: child); |
749 | if (ret) { |
750 | of_node_put(node: child); |
751 | dev_err(&pdev->dev, "init failed with %d\n" , ret); |
752 | break; |
753 | } |
754 | } |
755 | |
756 | if (list_empty(head: &kdev->list)) { |
757 | dev_err(dev, "no valid dma instance\n" ); |
758 | ret = -ENODEV; |
759 | goto err_put_sync; |
760 | } |
761 | |
762 | debugfs_create_file(name: "knav_dma" , S_IFREG | S_IRUGO, NULL, NULL, |
763 | fops: &knav_dma_debug_fops); |
764 | |
765 | device_ready = true; |
766 | return ret; |
767 | |
768 | err_put_sync: |
769 | pm_runtime_put_sync(dev: kdev->dev); |
770 | err_pm_disable: |
771 | pm_runtime_disable(dev: kdev->dev); |
772 | |
773 | return ret; |
774 | } |
775 | |
776 | static void knav_dma_remove(struct platform_device *pdev) |
777 | { |
778 | struct knav_dma_device *dma; |
779 | |
780 | list_for_each_entry(dma, &kdev->list, list) { |
781 | if (atomic_dec_return(v: &dma->ref_count) == 0) |
782 | knav_dma_hw_destroy(dma); |
783 | } |
784 | |
785 | pm_runtime_put_sync(dev: &pdev->dev); |
786 | pm_runtime_disable(dev: &pdev->dev); |
787 | } |
788 | |
789 | static struct of_device_id of_match[] = { |
790 | { .compatible = "ti,keystone-navigator-dma" , }, |
791 | {}, |
792 | }; |
793 | |
794 | MODULE_DEVICE_TABLE(of, of_match); |
795 | |
796 | static struct platform_driver knav_dma_driver = { |
797 | .probe = knav_dma_probe, |
798 | .remove_new = knav_dma_remove, |
799 | .driver = { |
800 | .name = "keystone-navigator-dma" , |
801 | .of_match_table = of_match, |
802 | }, |
803 | }; |
804 | module_platform_driver(knav_dma_driver); |
805 | |
806 | MODULE_LICENSE("GPL v2" ); |
807 | MODULE_DESCRIPTION("TI Keystone Navigator Packet DMA driver" ); |
808 | MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>" ); |
809 | MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>" ); |
810 | |