1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * TUSB6010 USB 2.0 OTG Dual Role controller OMAP DMA interface |
4 | * |
5 | * Copyright (C) 2006 Nokia Corporation |
6 | * Tony Lindgren <tony@atomide.com> |
7 | */ |
8 | #include <linux/module.h> |
9 | #include <linux/kernel.h> |
10 | #include <linux/errno.h> |
11 | #include <linux/usb.h> |
12 | #include <linux/platform_device.h> |
13 | #include <linux/dma-mapping.h> |
14 | #include <linux/slab.h> |
15 | #include <linux/dmaengine.h> |
16 | |
17 | #include "musb_core.h" |
18 | #include "tusb6010.h" |
19 | |
20 | #define to_chdat(c) ((struct tusb_omap_dma_ch *)(c)->private_data) |
21 | |
22 | #define MAX_DMAREQ 5 /* REVISIT: Really 6, but req5 not OK */ |
23 | |
24 | struct tusb_dma_data { |
25 | s8 dmareq; |
26 | struct dma_chan *chan; |
27 | }; |
28 | |
29 | struct tusb_omap_dma_ch { |
30 | struct musb *musb; |
31 | void __iomem *tbase; |
32 | unsigned long phys_offset; |
33 | int epnum; |
34 | u8 tx; |
35 | struct musb_hw_ep *hw_ep; |
36 | |
37 | struct tusb_dma_data *dma_data; |
38 | |
39 | struct tusb_omap_dma *tusb_dma; |
40 | |
41 | dma_addr_t dma_addr; |
42 | |
43 | u32 len; |
44 | u16 packet_sz; |
45 | u16 transfer_packet_sz; |
46 | u32 transfer_len; |
47 | u32 completed_len; |
48 | }; |
49 | |
50 | struct tusb_omap_dma { |
51 | struct dma_controller controller; |
52 | void __iomem *tbase; |
53 | |
54 | struct tusb_dma_data dma_pool[MAX_DMAREQ]; |
55 | unsigned multichannel:1; |
56 | }; |
57 | |
58 | /* |
59 | * Allocate dmareq0 to the current channel unless it's already taken |
60 | */ |
61 | static inline int tusb_omap_use_shared_dmareq(struct tusb_omap_dma_ch *chdat) |
62 | { |
63 | u32 reg = musb_readl(addr: chdat->tbase, TUSB_DMA_EP_MAP); |
64 | |
65 | if (reg != 0) { |
66 | dev_dbg(chdat->musb->controller, "ep%i dmareq0 is busy for ep%i\n" , |
67 | chdat->epnum, reg & 0xf); |
68 | return -EAGAIN; |
69 | } |
70 | |
71 | if (chdat->tx) |
72 | reg = (1 << 4) | chdat->epnum; |
73 | else |
74 | reg = chdat->epnum; |
75 | |
76 | musb_writel(addr: chdat->tbase, TUSB_DMA_EP_MAP, data: reg); |
77 | |
78 | return 0; |
79 | } |
80 | |
81 | static inline void tusb_omap_free_shared_dmareq(struct tusb_omap_dma_ch *chdat) |
82 | { |
83 | u32 reg = musb_readl(addr: chdat->tbase, TUSB_DMA_EP_MAP); |
84 | |
85 | if ((reg & 0xf) != chdat->epnum) { |
86 | printk(KERN_ERR "ep%i trying to release dmareq0 for ep%i\n" , |
87 | chdat->epnum, reg & 0xf); |
88 | return; |
89 | } |
90 | musb_writel(addr: chdat->tbase, TUSB_DMA_EP_MAP, data: 0); |
91 | } |
92 | |
93 | /* |
94 | * See also musb_dma_completion in plat_uds.c and musb_g_[tx|rx]() in |
95 | * musb_gadget.c. |
96 | */ |
97 | static void tusb_omap_dma_cb(void *data) |
98 | { |
99 | struct dma_channel *channel = (struct dma_channel *)data; |
100 | struct tusb_omap_dma_ch *chdat = to_chdat(channel); |
101 | struct tusb_omap_dma *tusb_dma = chdat->tusb_dma; |
102 | struct musb *musb = chdat->musb; |
103 | struct device *dev = musb->controller; |
104 | struct musb_hw_ep *hw_ep = chdat->hw_ep; |
105 | void __iomem *ep_conf = hw_ep->conf; |
106 | void __iomem *mbase = musb->mregs; |
107 | unsigned long remaining, flags, pio; |
108 | |
109 | spin_lock_irqsave(&musb->lock, flags); |
110 | |
111 | dev_dbg(musb->controller, "ep%i %s dma callback\n" , |
112 | chdat->epnum, chdat->tx ? "tx" : "rx" ); |
113 | |
114 | if (chdat->tx) |
115 | remaining = musb_readl(addr: ep_conf, TUSB_EP_TX_OFFSET); |
116 | else |
117 | remaining = musb_readl(addr: ep_conf, TUSB_EP_RX_OFFSET); |
118 | |
119 | remaining = TUSB_EP_CONFIG_XFR_SIZE(remaining); |
120 | |
121 | /* HW issue #10: XFR_SIZE may get corrupt on DMA (both async & sync) */ |
122 | if (unlikely(remaining > chdat->transfer_len)) { |
123 | dev_dbg(musb->controller, "Corrupt %s XFR_SIZE: 0x%08lx\n" , |
124 | chdat->tx ? "tx" : "rx" , remaining); |
125 | remaining = 0; |
126 | } |
127 | |
128 | channel->actual_len = chdat->transfer_len - remaining; |
129 | pio = chdat->len - channel->actual_len; |
130 | |
131 | dev_dbg(musb->controller, "DMA remaining %lu/%u\n" , remaining, chdat->transfer_len); |
132 | |
133 | /* Transfer remaining 1 - 31 bytes */ |
134 | if (pio > 0 && pio < 32) { |
135 | u8 *buf; |
136 | |
137 | dev_dbg(musb->controller, "Using PIO for remaining %lu bytes\n" , pio); |
138 | buf = phys_to_virt(address: (u32)chdat->dma_addr) + chdat->transfer_len; |
139 | if (chdat->tx) { |
140 | dma_unmap_single(dev, chdat->dma_addr, |
141 | chdat->transfer_len, |
142 | DMA_TO_DEVICE); |
143 | musb_write_fifo(ep: hw_ep, len: pio, src: buf); |
144 | } else { |
145 | dma_unmap_single(dev, chdat->dma_addr, |
146 | chdat->transfer_len, |
147 | DMA_FROM_DEVICE); |
148 | musb_read_fifo(ep: hw_ep, len: pio, dst: buf); |
149 | } |
150 | channel->actual_len += pio; |
151 | } |
152 | |
153 | if (!tusb_dma->multichannel) |
154 | tusb_omap_free_shared_dmareq(chdat); |
155 | |
156 | channel->status = MUSB_DMA_STATUS_FREE; |
157 | |
158 | musb_dma_completion(musb, epnum: chdat->epnum, transmit: chdat->tx); |
159 | |
160 | /* We must terminate short tx transfers manually by setting TXPKTRDY. |
161 | * REVISIT: This same problem may occur with other MUSB dma as well. |
162 | * Easy to test with g_ether by pinging the MUSB board with ping -s54. |
163 | */ |
164 | if ((chdat->transfer_len < chdat->packet_sz) |
165 | || (chdat->transfer_len % chdat->packet_sz != 0)) { |
166 | u16 csr; |
167 | |
168 | if (chdat->tx) { |
169 | dev_dbg(musb->controller, "terminating short tx packet\n" ); |
170 | musb_ep_select(mbase, chdat->epnum); |
171 | csr = musb_readw(hw_ep->regs, MUSB_TXCSR); |
172 | csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY |
173 | | MUSB_TXCSR_P_WZC_BITS; |
174 | musb_writew(hw_ep->regs, MUSB_TXCSR, csr); |
175 | } |
176 | } |
177 | |
178 | spin_unlock_irqrestore(lock: &musb->lock, flags); |
179 | } |
180 | |
181 | static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz, |
182 | u8 rndis_mode, dma_addr_t dma_addr, u32 len) |
183 | { |
184 | struct tusb_omap_dma_ch *chdat = to_chdat(channel); |
185 | struct tusb_omap_dma *tusb_dma = chdat->tusb_dma; |
186 | struct musb *musb = chdat->musb; |
187 | struct device *dev = musb->controller; |
188 | struct musb_hw_ep *hw_ep = chdat->hw_ep; |
189 | void __iomem *mbase = musb->mregs; |
190 | void __iomem *ep_conf = hw_ep->conf; |
191 | dma_addr_t fifo_addr = hw_ep->fifo_sync; |
192 | u32 dma_remaining; |
193 | u16 csr; |
194 | u32 psize; |
195 | struct tusb_dma_data *dma_data; |
196 | struct dma_async_tx_descriptor *dma_desc; |
197 | struct dma_slave_config dma_cfg; |
198 | enum dma_transfer_direction dma_dir; |
199 | u32 port_window; |
200 | int ret; |
201 | |
202 | if (unlikely(dma_addr & 0x1) || (len < 32) || (len > packet_sz)) |
203 | return false; |
204 | |
205 | /* |
206 | * HW issue #10: Async dma will eventually corrupt the XFR_SIZE |
207 | * register which will cause missed DMA interrupt. We could try to |
208 | * use a timer for the callback, but it is unsafe as the XFR_SIZE |
209 | * register is corrupt, and we won't know if the DMA worked. |
210 | */ |
211 | if (dma_addr & 0x2) |
212 | return false; |
213 | |
214 | /* |
215 | * Because of HW issue #10, it seems like mixing sync DMA and async |
216 | * PIO access can confuse the DMA. Make sure XFR_SIZE is reset before |
217 | * using the channel for DMA. |
218 | */ |
219 | if (chdat->tx) |
220 | dma_remaining = musb_readl(addr: ep_conf, TUSB_EP_TX_OFFSET); |
221 | else |
222 | dma_remaining = musb_readl(addr: ep_conf, TUSB_EP_RX_OFFSET); |
223 | |
224 | dma_remaining = TUSB_EP_CONFIG_XFR_SIZE(dma_remaining); |
225 | if (dma_remaining) { |
226 | dev_dbg(musb->controller, "Busy %s dma, not using: %08x\n" , |
227 | chdat->tx ? "tx" : "rx" , dma_remaining); |
228 | return false; |
229 | } |
230 | |
231 | chdat->transfer_len = len & ~0x1f; |
232 | |
233 | if (len < packet_sz) |
234 | chdat->transfer_packet_sz = chdat->transfer_len; |
235 | else |
236 | chdat->transfer_packet_sz = packet_sz; |
237 | |
238 | dma_data = chdat->dma_data; |
239 | if (!tusb_dma->multichannel) { |
240 | if (tusb_omap_use_shared_dmareq(chdat) != 0) { |
241 | dev_dbg(musb->controller, "could not get dma for ep%i\n" , chdat->epnum); |
242 | return false; |
243 | } |
244 | if (dma_data->dmareq < 0) { |
245 | /* REVISIT: This should get blocked earlier, happens |
246 | * with MSC ErrorRecoveryTest |
247 | */ |
248 | WARN_ON(1); |
249 | return false; |
250 | } |
251 | } |
252 | |
253 | chdat->packet_sz = packet_sz; |
254 | chdat->len = len; |
255 | channel->actual_len = 0; |
256 | chdat->dma_addr = dma_addr; |
257 | channel->status = MUSB_DMA_STATUS_BUSY; |
258 | |
259 | /* Since we're recycling dma areas, we need to clean or invalidate */ |
260 | if (chdat->tx) { |
261 | dma_dir = DMA_MEM_TO_DEV; |
262 | dma_map_single(dev, phys_to_virt(dma_addr), len, |
263 | DMA_TO_DEVICE); |
264 | } else { |
265 | dma_dir = DMA_DEV_TO_MEM; |
266 | dma_map_single(dev, phys_to_virt(dma_addr), len, |
267 | DMA_FROM_DEVICE); |
268 | } |
269 | |
270 | memset(&dma_cfg, 0, sizeof(dma_cfg)); |
271 | |
272 | /* Use 16-bit transfer if dma_addr is not 32-bit aligned */ |
273 | if ((dma_addr & 0x3) == 0) { |
274 | dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
275 | dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
276 | port_window = 8; |
277 | } else { |
278 | dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; |
279 | dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; |
280 | port_window = 16; |
281 | |
282 | fifo_addr = hw_ep->fifo_async; |
283 | } |
284 | |
285 | dev_dbg(musb->controller, |
286 | "ep%i %s dma: %pad len: %u(%u) packet_sz: %i(%i)\n" , |
287 | chdat->epnum, chdat->tx ? "tx" : "rx" , &dma_addr, |
288 | chdat->transfer_len, len, chdat->transfer_packet_sz, packet_sz); |
289 | |
290 | dma_cfg.src_addr = fifo_addr; |
291 | dma_cfg.dst_addr = fifo_addr; |
292 | dma_cfg.src_port_window_size = port_window; |
293 | dma_cfg.src_maxburst = port_window; |
294 | dma_cfg.dst_port_window_size = port_window; |
295 | dma_cfg.dst_maxburst = port_window; |
296 | |
297 | ret = dmaengine_slave_config(chan: dma_data->chan, config: &dma_cfg); |
298 | if (ret) { |
299 | dev_err(musb->controller, "DMA slave config failed: %d\n" , ret); |
300 | return false; |
301 | } |
302 | |
303 | dma_desc = dmaengine_prep_slave_single(chan: dma_data->chan, buf: dma_addr, |
304 | len: chdat->transfer_len, dir: dma_dir, |
305 | flags: DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
306 | if (!dma_desc) { |
307 | dev_err(musb->controller, "DMA prep_slave_single failed\n" ); |
308 | return false; |
309 | } |
310 | |
311 | dma_desc->callback = tusb_omap_dma_cb; |
312 | dma_desc->callback_param = channel; |
313 | dmaengine_submit(desc: dma_desc); |
314 | |
315 | dev_dbg(musb->controller, |
316 | "ep%i %s using %i-bit %s dma from %pad to %pad\n" , |
317 | chdat->epnum, chdat->tx ? "tx" : "rx" , |
318 | dma_cfg.src_addr_width * 8, |
319 | ((dma_addr & 0x3) == 0) ? "sync" : "async" , |
320 | (dma_dir == DMA_MEM_TO_DEV) ? &dma_addr : &fifo_addr, |
321 | (dma_dir == DMA_MEM_TO_DEV) ? &fifo_addr : &dma_addr); |
322 | |
323 | /* |
324 | * Prepare MUSB for DMA transfer |
325 | */ |
326 | musb_ep_select(mbase, chdat->epnum); |
327 | if (chdat->tx) { |
328 | csr = musb_readw(hw_ep->regs, MUSB_TXCSR); |
329 | csr |= (MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB |
330 | | MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE); |
331 | csr &= ~MUSB_TXCSR_P_UNDERRUN; |
332 | musb_writew(hw_ep->regs, MUSB_TXCSR, csr); |
333 | } else { |
334 | csr = musb_readw(hw_ep->regs, MUSB_RXCSR); |
335 | csr |= MUSB_RXCSR_DMAENAB; |
336 | csr &= ~(MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAMODE); |
337 | musb_writew(hw_ep->regs, MUSB_RXCSR, |
338 | csr | MUSB_RXCSR_P_WZC_BITS); |
339 | } |
340 | |
341 | /* Start DMA transfer */ |
342 | dma_async_issue_pending(chan: dma_data->chan); |
343 | |
344 | if (chdat->tx) { |
345 | /* Send transfer_packet_sz packets at a time */ |
346 | psize = musb_readl(addr: ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET); |
347 | psize &= ~0x7ff; |
348 | psize |= chdat->transfer_packet_sz; |
349 | musb_writel(addr: ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, data: psize); |
350 | |
351 | musb_writel(addr: ep_conf, TUSB_EP_TX_OFFSET, |
352 | TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len)); |
353 | } else { |
354 | /* Receive transfer_packet_sz packets at a time */ |
355 | psize = musb_readl(addr: ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET); |
356 | psize &= ~(0x7ff << 16); |
357 | psize |= (chdat->transfer_packet_sz << 16); |
358 | musb_writel(addr: ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, data: psize); |
359 | |
360 | musb_writel(addr: ep_conf, TUSB_EP_RX_OFFSET, |
361 | TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len)); |
362 | } |
363 | |
364 | return true; |
365 | } |
366 | |
367 | static int tusb_omap_dma_abort(struct dma_channel *channel) |
368 | { |
369 | struct tusb_omap_dma_ch *chdat = to_chdat(channel); |
370 | |
371 | if (chdat->dma_data) |
372 | dmaengine_terminate_all(chan: chdat->dma_data->chan); |
373 | |
374 | channel->status = MUSB_DMA_STATUS_FREE; |
375 | |
376 | return 0; |
377 | } |
378 | |
379 | static inline int tusb_omap_dma_allocate_dmareq(struct tusb_omap_dma_ch *chdat) |
380 | { |
381 | u32 reg = musb_readl(addr: chdat->tbase, TUSB_DMA_EP_MAP); |
382 | int i, dmareq_nr = -1; |
383 | |
384 | for (i = 0; i < MAX_DMAREQ; i++) { |
385 | int cur = (reg & (0xf << (i * 5))) >> (i * 5); |
386 | if (cur == 0) { |
387 | dmareq_nr = i; |
388 | break; |
389 | } |
390 | } |
391 | |
392 | if (dmareq_nr == -1) |
393 | return -EAGAIN; |
394 | |
395 | reg |= (chdat->epnum << (dmareq_nr * 5)); |
396 | if (chdat->tx) |
397 | reg |= ((1 << 4) << (dmareq_nr * 5)); |
398 | musb_writel(addr: chdat->tbase, TUSB_DMA_EP_MAP, data: reg); |
399 | |
400 | chdat->dma_data = &chdat->tusb_dma->dma_pool[dmareq_nr]; |
401 | |
402 | return 0; |
403 | } |
404 | |
405 | static inline void tusb_omap_dma_free_dmareq(struct tusb_omap_dma_ch *chdat) |
406 | { |
407 | u32 reg; |
408 | |
409 | if (!chdat || !chdat->dma_data || chdat->dma_data->dmareq < 0) |
410 | return; |
411 | |
412 | reg = musb_readl(addr: chdat->tbase, TUSB_DMA_EP_MAP); |
413 | reg &= ~(0x1f << (chdat->dma_data->dmareq * 5)); |
414 | musb_writel(addr: chdat->tbase, TUSB_DMA_EP_MAP, data: reg); |
415 | |
416 | chdat->dma_data = NULL; |
417 | } |
418 | |
419 | static struct dma_channel *dma_channel_pool[MAX_DMAREQ]; |
420 | |
421 | static struct dma_channel * |
422 | tusb_omap_dma_allocate(struct dma_controller *c, |
423 | struct musb_hw_ep *hw_ep, |
424 | u8 tx) |
425 | { |
426 | int ret, i; |
427 | struct tusb_omap_dma *tusb_dma; |
428 | struct musb *musb; |
429 | struct dma_channel *channel = NULL; |
430 | struct tusb_omap_dma_ch *chdat = NULL; |
431 | struct tusb_dma_data *dma_data = NULL; |
432 | |
433 | tusb_dma = container_of(c, struct tusb_omap_dma, controller); |
434 | musb = tusb_dma->controller.musb; |
435 | |
436 | /* REVISIT: Why does dmareq5 not work? */ |
437 | if (hw_ep->epnum == 0) { |
438 | dev_dbg(musb->controller, "Not allowing DMA for ep0 %s\n" , tx ? "tx" : "rx" ); |
439 | return NULL; |
440 | } |
441 | |
442 | for (i = 0; i < MAX_DMAREQ; i++) { |
443 | struct dma_channel *ch = dma_channel_pool[i]; |
444 | if (ch->status == MUSB_DMA_STATUS_UNKNOWN) { |
445 | ch->status = MUSB_DMA_STATUS_FREE; |
446 | channel = ch; |
447 | chdat = ch->private_data; |
448 | break; |
449 | } |
450 | } |
451 | |
452 | if (!channel) |
453 | return NULL; |
454 | |
455 | chdat->musb = tusb_dma->controller.musb; |
456 | chdat->tbase = tusb_dma->tbase; |
457 | chdat->hw_ep = hw_ep; |
458 | chdat->epnum = hw_ep->epnum; |
459 | chdat->completed_len = 0; |
460 | chdat->tusb_dma = tusb_dma; |
461 | if (tx) |
462 | chdat->tx = 1; |
463 | else |
464 | chdat->tx = 0; |
465 | |
466 | channel->max_len = 0x7fffffff; |
467 | channel->desired_mode = 0; |
468 | channel->actual_len = 0; |
469 | |
470 | if (!chdat->dma_data) { |
471 | if (tusb_dma->multichannel) { |
472 | ret = tusb_omap_dma_allocate_dmareq(chdat); |
473 | if (ret != 0) |
474 | goto free_dmareq; |
475 | } else { |
476 | chdat->dma_data = &tusb_dma->dma_pool[0]; |
477 | } |
478 | } |
479 | |
480 | dma_data = chdat->dma_data; |
481 | |
482 | dev_dbg(musb->controller, "ep%i %s dma: %s dmareq%i\n" , |
483 | chdat->epnum, |
484 | chdat->tx ? "tx" : "rx" , |
485 | tusb_dma->multichannel ? "shared" : "dedicated" , |
486 | dma_data->dmareq); |
487 | |
488 | return channel; |
489 | |
490 | free_dmareq: |
491 | tusb_omap_dma_free_dmareq(chdat); |
492 | |
493 | dev_dbg(musb->controller, "ep%i: Could not get a DMA channel\n" , chdat->epnum); |
494 | channel->status = MUSB_DMA_STATUS_UNKNOWN; |
495 | |
496 | return NULL; |
497 | } |
498 | |
499 | static void tusb_omap_dma_release(struct dma_channel *channel) |
500 | { |
501 | struct tusb_omap_dma_ch *chdat = to_chdat(channel); |
502 | struct musb *musb = chdat->musb; |
503 | |
504 | dev_dbg(musb->controller, "Release for ep%i\n" , chdat->epnum); |
505 | |
506 | channel->status = MUSB_DMA_STATUS_UNKNOWN; |
507 | |
508 | dmaengine_terminate_sync(chan: chdat->dma_data->chan); |
509 | tusb_omap_dma_free_dmareq(chdat); |
510 | |
511 | channel = NULL; |
512 | } |
513 | |
514 | void tusb_dma_controller_destroy(struct dma_controller *c) |
515 | { |
516 | struct tusb_omap_dma *tusb_dma; |
517 | int i; |
518 | |
519 | tusb_dma = container_of(c, struct tusb_omap_dma, controller); |
520 | for (i = 0; i < MAX_DMAREQ; i++) { |
521 | struct dma_channel *ch = dma_channel_pool[i]; |
522 | if (ch) { |
523 | kfree(objp: ch->private_data); |
524 | kfree(objp: ch); |
525 | } |
526 | |
527 | /* Free up the DMA channels */ |
528 | if (tusb_dma && tusb_dma->dma_pool[i].chan) |
529 | dma_release_channel(chan: tusb_dma->dma_pool[i].chan); |
530 | } |
531 | |
532 | kfree(objp: tusb_dma); |
533 | } |
534 | EXPORT_SYMBOL_GPL(tusb_dma_controller_destroy); |
535 | |
536 | static int tusb_omap_allocate_dma_pool(struct tusb_omap_dma *tusb_dma) |
537 | { |
538 | struct musb *musb = tusb_dma->controller.musb; |
539 | int i; |
540 | int ret = 0; |
541 | |
542 | for (i = 0; i < MAX_DMAREQ; i++) { |
543 | struct tusb_dma_data *dma_data = &tusb_dma->dma_pool[i]; |
544 | |
545 | /* |
546 | * Request DMA channels: |
547 | * - one channel in case of non multichannel mode |
548 | * - MAX_DMAREQ number of channels in multichannel mode |
549 | */ |
550 | if (i == 0 || tusb_dma->multichannel) { |
551 | char ch_name[8]; |
552 | |
553 | sprintf(buf: ch_name, fmt: "dmareq%d" , i); |
554 | dma_data->chan = dma_request_chan(dev: musb->controller, |
555 | name: ch_name); |
556 | if (IS_ERR(ptr: dma_data->chan)) { |
557 | dev_err(musb->controller, |
558 | "Failed to request %s\n" , ch_name); |
559 | ret = PTR_ERR(ptr: dma_data->chan); |
560 | goto dma_error; |
561 | } |
562 | |
563 | dma_data->dmareq = i; |
564 | } else { |
565 | dma_data->dmareq = -1; |
566 | } |
567 | } |
568 | |
569 | return 0; |
570 | |
571 | dma_error: |
572 | for (; i >= 0; i--) { |
573 | struct tusb_dma_data *dma_data = &tusb_dma->dma_pool[i]; |
574 | |
575 | if (dma_data->dmareq >= 0) |
576 | dma_release_channel(chan: dma_data->chan); |
577 | } |
578 | |
579 | return ret; |
580 | } |
581 | |
582 | struct dma_controller * |
583 | tusb_dma_controller_create(struct musb *musb, void __iomem *base) |
584 | { |
585 | void __iomem *tbase = musb->ctrl_base; |
586 | struct tusb_omap_dma *tusb_dma; |
587 | int i; |
588 | |
589 | /* REVISIT: Get dmareq lines used from board-*.c */ |
590 | |
591 | musb_writel(addr: musb->ctrl_base, TUSB_DMA_INT_MASK, data: 0x7fffffff); |
592 | musb_writel(addr: musb->ctrl_base, TUSB_DMA_EP_MAP, data: 0); |
593 | |
594 | musb_writel(addr: tbase, TUSB_DMA_REQ_CONF, |
595 | TUSB_DMA_REQ_CONF_BURST_SIZE(2) |
596 | | TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f) |
597 | | TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2)); |
598 | |
599 | tusb_dma = kzalloc(size: sizeof(struct tusb_omap_dma), GFP_KERNEL); |
600 | if (!tusb_dma) |
601 | goto out; |
602 | |
603 | tusb_dma->controller.musb = musb; |
604 | tusb_dma->tbase = musb->ctrl_base; |
605 | |
606 | tusb_dma->controller.channel_alloc = tusb_omap_dma_allocate; |
607 | tusb_dma->controller.channel_release = tusb_omap_dma_release; |
608 | tusb_dma->controller.channel_program = tusb_omap_dma_program; |
609 | tusb_dma->controller.channel_abort = tusb_omap_dma_abort; |
610 | |
611 | if (musb->tusb_revision >= TUSB_REV_30) |
612 | tusb_dma->multichannel = 1; |
613 | |
614 | for (i = 0; i < MAX_DMAREQ; i++) { |
615 | struct dma_channel *ch; |
616 | struct tusb_omap_dma_ch *chdat; |
617 | |
618 | ch = kzalloc(size: sizeof(struct dma_channel), GFP_KERNEL); |
619 | if (!ch) |
620 | goto cleanup; |
621 | |
622 | dma_channel_pool[i] = ch; |
623 | |
624 | chdat = kzalloc(size: sizeof(struct tusb_omap_dma_ch), GFP_KERNEL); |
625 | if (!chdat) |
626 | goto cleanup; |
627 | |
628 | ch->status = MUSB_DMA_STATUS_UNKNOWN; |
629 | ch->private_data = chdat; |
630 | } |
631 | |
632 | if (tusb_omap_allocate_dma_pool(tusb_dma)) |
633 | goto cleanup; |
634 | |
635 | return &tusb_dma->controller; |
636 | |
637 | cleanup: |
638 | musb_dma_controller_destroy(d: &tusb_dma->controller); |
639 | out: |
640 | return NULL; |
641 | } |
642 | EXPORT_SYMBOL_GPL(tusb_dma_controller_create); |
643 | |