1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * MUSB OTG driver host support |
4 | * |
5 | * Copyright 2005 Mentor Graphics Corporation |
6 | * Copyright (C) 2005-2006 by Texas Instruments |
7 | * Copyright (C) 2006-2007 Nokia Corporation |
8 | * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com> |
9 | */ |
10 | |
11 | #include <linux/module.h> |
12 | #include <linux/kernel.h> |
13 | #include <linux/delay.h> |
14 | #include <linux/sched.h> |
15 | #include <linux/slab.h> |
16 | #include <linux/errno.h> |
17 | #include <linux/list.h> |
18 | #include <linux/dma-mapping.h> |
19 | |
20 | #include "musb_core.h" |
21 | #include "musb_host.h" |
22 | #include "musb_trace.h" |
23 | |
24 | /* MUSB HOST status 22-mar-2006 |
25 | * |
26 | * - There's still lots of partial code duplication for fault paths, so |
27 | * they aren't handled as consistently as they need to be. |
28 | * |
29 | * - PIO mostly behaved when last tested. |
30 | * + including ep0, with all usbtest cases 9, 10 |
31 | * + usbtest 14 (ep0out) doesn't seem to run at all |
32 | * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest |
33 | * configurations, but otherwise double buffering passes basic tests. |
34 | * + for 2.6.N, for N > ~10, needs API changes for hcd framework. |
35 | * |
36 | * - DMA (CPPI) ... partially behaves, not currently recommended |
37 | * + about 1/15 the speed of typical EHCI implementations (PCI) |
38 | * + RX, all too often reqpkt seems to misbehave after tx |
39 | * + TX, no known issues (other than evident silicon issue) |
40 | * |
41 | * - DMA (Mentor/OMAP) ...has at least toggle update problems |
42 | * |
43 | * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet |
44 | * starvation ... nothing yet for TX, interrupt, or bulk. |
45 | * |
46 | * - Not tested with HNP, but some SRP paths seem to behave. |
47 | * |
48 | * NOTE 24-August-2006: |
49 | * |
50 | * - Bulk traffic finally uses both sides of hardware ep1, freeing up an |
51 | * extra endpoint for periodic use enabling hub + keybd + mouse. That |
52 | * mostly works, except that with "usbnet" it's easy to trigger cases |
53 | * with "ping" where RX loses. (a) ping to davinci, even "ping -f", |
54 | * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses |
55 | * although ARP RX wins. (That test was done with a full speed link.) |
56 | */ |
57 | |
58 | |
59 | /* |
60 | * NOTE on endpoint usage: |
61 | * |
62 | * CONTROL transfers all go through ep0. BULK ones go through dedicated IN |
63 | * and OUT endpoints ... hardware is dedicated for those "async" queue(s). |
64 | * (Yes, bulk _could_ use more of the endpoints than that, and would even |
65 | * benefit from it.) |
66 | * |
67 | * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints. |
68 | * So far that scheduling is both dumb and optimistic: the endpoint will be |
69 | * "claimed" until its software queue is no longer refilled. No multiplexing |
70 | * of transfers between endpoints, or anything clever. |
71 | */ |
72 | |
73 | struct musb *hcd_to_musb(struct usb_hcd *hcd) |
74 | { |
75 | return *(struct musb **) hcd->hcd_priv; |
76 | } |
77 | |
78 | |
79 | static void musb_ep_program(struct musb *musb, u8 epnum, |
80 | struct urb *urb, int is_out, |
81 | u8 *buf, u32 offset, u32 len); |
82 | |
83 | /* |
84 | * Clear TX fifo. Needed to avoid BABBLE errors. |
85 | */ |
86 | static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep) |
87 | { |
88 | struct musb *musb = ep->musb; |
89 | void __iomem *epio = ep->regs; |
90 | u16 csr; |
91 | int retries = 1000; |
92 | |
93 | csr = musb_readw(epio, MUSB_TXCSR); |
94 | while (csr & MUSB_TXCSR_FIFONOTEMPTY) { |
95 | csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_TXPKTRDY; |
96 | musb_writew(epio, MUSB_TXCSR, csr); |
97 | csr = musb_readw(epio, MUSB_TXCSR); |
98 | |
99 | /* |
100 | * FIXME: sometimes the tx fifo flush failed, it has been |
101 | * observed during device disconnect on AM335x. |
102 | * |
103 | * To reproduce the issue, ensure tx urb(s) are queued when |
104 | * unplug the usb device which is connected to AM335x usb |
105 | * host port. |
106 | * |
107 | * I found using a usb-ethernet device and running iperf |
108 | * (client on AM335x) has very high chance to trigger it. |
109 | * |
110 | * Better to turn on musb_dbg() in musb_cleanup_urb() with |
111 | * CPPI enabled to see the issue when aborting the tx channel. |
112 | */ |
113 | if (dev_WARN_ONCE(musb->controller, retries-- < 1, |
114 | "Could not flush host TX%d fifo: csr: %04x\n" , |
115 | ep->epnum, csr)) |
116 | return; |
117 | mdelay(1); |
118 | } |
119 | } |
120 | |
121 | static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep) |
122 | { |
123 | void __iomem *epio = ep->regs; |
124 | u16 csr; |
125 | int retries = 5; |
126 | |
127 | /* scrub any data left in the fifo */ |
128 | do { |
129 | csr = musb_readw(epio, MUSB_TXCSR); |
130 | if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY))) |
131 | break; |
132 | musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO); |
133 | csr = musb_readw(epio, MUSB_TXCSR); |
134 | udelay(10); |
135 | } while (--retries); |
136 | |
137 | WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n" , |
138 | ep->epnum, csr); |
139 | |
140 | /* and reset for the next transfer */ |
141 | musb_writew(epio, MUSB_TXCSR, 0); |
142 | } |
143 | |
144 | /* |
145 | * Start transmit. Caller is responsible for locking shared resources. |
146 | * musb must be locked. |
147 | */ |
148 | static inline void musb_h_tx_start(struct musb_hw_ep *ep) |
149 | { |
150 | u16 txcsr; |
151 | |
152 | /* NOTE: no locks here; caller should lock and select EP */ |
153 | if (ep->epnum) { |
154 | txcsr = musb_readw(ep->regs, MUSB_TXCSR); |
155 | txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS; |
156 | musb_writew(ep->regs, MUSB_TXCSR, txcsr); |
157 | } else { |
158 | txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY; |
159 | musb_writew(ep->regs, MUSB_CSR0, txcsr); |
160 | } |
161 | |
162 | } |
163 | |
164 | static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep) |
165 | { |
166 | u16 txcsr; |
167 | |
168 | /* NOTE: no locks here; caller should lock and select EP */ |
169 | txcsr = musb_readw(ep->regs, MUSB_TXCSR); |
170 | txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS; |
171 | if (is_cppi_enabled(ep->musb)) |
172 | txcsr |= MUSB_TXCSR_DMAMODE; |
173 | musb_writew(ep->regs, MUSB_TXCSR, txcsr); |
174 | } |
175 | |
176 | static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh) |
177 | { |
178 | if (is_in != 0 || ep->is_shared_fifo) |
179 | ep->in_qh = qh; |
180 | if (is_in == 0 || ep->is_shared_fifo) |
181 | ep->out_qh = qh; |
182 | } |
183 | |
184 | static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in) |
185 | { |
186 | return is_in ? ep->in_qh : ep->out_qh; |
187 | } |
188 | |
189 | /* |
190 | * Start the URB at the front of an endpoint's queue |
191 | * end must be claimed from the caller. |
192 | * |
193 | * Context: controller locked, irqs blocked |
194 | */ |
195 | static void |
196 | musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh) |
197 | { |
198 | u32 len; |
199 | void __iomem *mbase = musb->mregs; |
200 | struct urb *urb = next_urb(qh); |
201 | void *buf = urb->transfer_buffer; |
202 | u32 offset = 0; |
203 | struct musb_hw_ep *hw_ep = qh->hw_ep; |
204 | int epnum = hw_ep->epnum; |
205 | |
206 | /* initialize software qh state */ |
207 | qh->offset = 0; |
208 | qh->segsize = 0; |
209 | |
210 | /* gather right source of data */ |
211 | switch (qh->type) { |
212 | case USB_ENDPOINT_XFER_CONTROL: |
213 | /* control transfers always start with SETUP */ |
214 | is_in = 0; |
215 | musb->ep0_stage = MUSB_EP0_START; |
216 | buf = urb->setup_packet; |
217 | len = 8; |
218 | break; |
219 | case USB_ENDPOINT_XFER_ISOC: |
220 | qh->iso_idx = 0; |
221 | qh->frame = 0; |
222 | offset = urb->iso_frame_desc[0].offset; |
223 | len = urb->iso_frame_desc[0].length; |
224 | break; |
225 | default: /* bulk, interrupt */ |
226 | /* actual_length may be nonzero on retry paths */ |
227 | buf = urb->transfer_buffer + urb->actual_length; |
228 | len = urb->transfer_buffer_length - urb->actual_length; |
229 | } |
230 | |
231 | trace_musb_urb_start(musb, urb); |
232 | |
233 | /* Configure endpoint */ |
234 | musb_ep_set_qh(ep: hw_ep, is_in, qh); |
235 | musb_ep_program(musb, epnum, urb, is_out: !is_in, buf, offset, len); |
236 | |
237 | /* transmit may have more work: start it when it is time */ |
238 | if (is_in) |
239 | return; |
240 | |
241 | /* determine if the time is right for a periodic transfer */ |
242 | switch (qh->type) { |
243 | case USB_ENDPOINT_XFER_ISOC: |
244 | case USB_ENDPOINT_XFER_INT: |
245 | musb_dbg(musb, fmt: "check whether there's still time for periodic Tx" ); |
246 | /* FIXME this doesn't implement that scheduling policy ... |
247 | * or handle framecounter wrapping |
248 | */ |
249 | if (1) { /* Always assume URB_ISO_ASAP */ |
250 | /* REVISIT the SOF irq handler shouldn't duplicate |
251 | * this code; and we don't init urb->start_frame... |
252 | */ |
253 | qh->frame = 0; |
254 | goto start; |
255 | } else { |
256 | qh->frame = urb->start_frame; |
257 | /* enable SOF interrupt so we can count down */ |
258 | musb_dbg(musb, fmt: "SOF for %d" , epnum); |
259 | #if 1 /* ifndef CONFIG_ARCH_DAVINCI */ |
260 | musb_writeb(mbase, MUSB_INTRUSBE, 0xff); |
261 | #endif |
262 | } |
263 | break; |
264 | default: |
265 | start: |
266 | musb_dbg(musb, fmt: "Start TX%d %s" , epnum, |
267 | hw_ep->tx_channel ? "dma" : "pio" ); |
268 | |
269 | if (!hw_ep->tx_channel) |
270 | musb_h_tx_start(ep: hw_ep); |
271 | else if (is_cppi_enabled(musb) || tusb_dma_omap(musb)) |
272 | musb_h_tx_dma_start(ep: hw_ep); |
273 | } |
274 | } |
275 | |
276 | /* Context: caller owns controller lock, IRQs are blocked */ |
277 | static void musb_giveback(struct musb *musb, struct urb *urb, int status) |
278 | __releases(musb->lock) |
279 | __acquires(musb->lock) |
280 | { |
281 | trace_musb_urb_gb(musb, urb); |
282 | |
283 | usb_hcd_unlink_urb_from_ep(hcd: musb->hcd, urb); |
284 | spin_unlock(lock: &musb->lock); |
285 | usb_hcd_giveback_urb(hcd: musb->hcd, urb, status); |
286 | spin_lock(lock: &musb->lock); |
287 | } |
288 | |
289 | /* |
290 | * Advance this hardware endpoint's queue, completing the specified URB and |
291 | * advancing to either the next URB queued to that qh, or else invalidating |
292 | * that qh and advancing to the next qh scheduled after the current one. |
293 | * |
294 | * Context: caller owns controller lock, IRQs are blocked |
295 | */ |
296 | static void musb_advance_schedule(struct musb *musb, struct urb *urb, |
297 | struct musb_hw_ep *hw_ep, int is_in) |
298 | { |
299 | struct musb_qh *qh = musb_ep_get_qh(ep: hw_ep, is_in); |
300 | struct musb_hw_ep *ep = qh->hw_ep; |
301 | int ready = qh->is_ready; |
302 | int status; |
303 | u16 toggle; |
304 | |
305 | status = (urb->status == -EINPROGRESS) ? 0 : urb->status; |
306 | |
307 | /* save toggle eagerly, for paranoia */ |
308 | switch (qh->type) { |
309 | case USB_ENDPOINT_XFER_BULK: |
310 | case USB_ENDPOINT_XFER_INT: |
311 | toggle = musb->io.get_toggle(qh, !is_in); |
312 | usb_settoggle(urb->dev, qh->epnum, !is_in, toggle ? 1 : 0); |
313 | break; |
314 | case USB_ENDPOINT_XFER_ISOC: |
315 | if (status == 0 && urb->error_count) |
316 | status = -EXDEV; |
317 | break; |
318 | } |
319 | |
320 | qh->is_ready = 0; |
321 | musb_giveback(musb, urb, status); |
322 | qh->is_ready = ready; |
323 | |
324 | /* |
325 | * musb->lock had been unlocked in musb_giveback, so qh may |
326 | * be freed, need to get it again |
327 | */ |
328 | qh = musb_ep_get_qh(ep: hw_ep, is_in); |
329 | |
330 | /* reclaim resources (and bandwidth) ASAP; deschedule it, and |
331 | * invalidate qh as soon as list_empty(&hep->urb_list) |
332 | */ |
333 | if (qh && list_empty(head: &qh->hep->urb_list)) { |
334 | struct list_head *head; |
335 | struct dma_controller *dma = musb->dma_controller; |
336 | |
337 | if (is_in) { |
338 | ep->rx_reinit = 1; |
339 | if (ep->rx_channel) { |
340 | dma->channel_release(ep->rx_channel); |
341 | ep->rx_channel = NULL; |
342 | } |
343 | } else { |
344 | ep->tx_reinit = 1; |
345 | if (ep->tx_channel) { |
346 | dma->channel_release(ep->tx_channel); |
347 | ep->tx_channel = NULL; |
348 | } |
349 | } |
350 | |
351 | /* Clobber old pointers to this qh */ |
352 | musb_ep_set_qh(ep, is_in, NULL); |
353 | qh->hep->hcpriv = NULL; |
354 | |
355 | switch (qh->type) { |
356 | |
357 | case USB_ENDPOINT_XFER_CONTROL: |
358 | case USB_ENDPOINT_XFER_BULK: |
359 | /* fifo policy for these lists, except that NAKing |
360 | * should rotate a qh to the end (for fairness). |
361 | */ |
362 | if (qh->mux == 1) { |
363 | head = qh->ring.prev; |
364 | list_del(entry: &qh->ring); |
365 | kfree(objp: qh); |
366 | qh = first_qh(q: head); |
367 | break; |
368 | } |
369 | fallthrough; |
370 | |
371 | case USB_ENDPOINT_XFER_ISOC: |
372 | case USB_ENDPOINT_XFER_INT: |
373 | /* this is where periodic bandwidth should be |
374 | * de-allocated if it's tracked and allocated; |
375 | * and where we'd update the schedule tree... |
376 | */ |
377 | kfree(objp: qh); |
378 | qh = NULL; |
379 | break; |
380 | } |
381 | } |
382 | |
383 | if (qh != NULL && qh->is_ready) { |
384 | musb_dbg(musb, fmt: "... next ep%d %cX urb %p" , |
385 | hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh)); |
386 | musb_start_urb(musb, is_in, qh); |
387 | } |
388 | } |
389 | |
390 | static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr) |
391 | { |
392 | /* we don't want fifo to fill itself again; |
393 | * ignore dma (various models), |
394 | * leave toggle alone (may not have been saved yet) |
395 | */ |
396 | csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY; |
397 | csr &= ~(MUSB_RXCSR_H_REQPKT |
398 | | MUSB_RXCSR_H_AUTOREQ |
399 | | MUSB_RXCSR_AUTOCLEAR); |
400 | |
401 | /* write 2x to allow double buffering */ |
402 | musb_writew(hw_ep->regs, MUSB_RXCSR, csr); |
403 | musb_writew(hw_ep->regs, MUSB_RXCSR, csr); |
404 | |
405 | /* flush writebuffer */ |
406 | return musb_readw(hw_ep->regs, MUSB_RXCSR); |
407 | } |
408 | |
409 | /* |
410 | * PIO RX for a packet (or part of it). |
411 | */ |
412 | static bool |
413 | musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err) |
414 | { |
415 | u16 rx_count; |
416 | u8 *buf; |
417 | u16 csr; |
418 | bool done = false; |
419 | u32 length; |
420 | int do_flush = 0; |
421 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; |
422 | void __iomem *epio = hw_ep->regs; |
423 | struct musb_qh *qh = hw_ep->in_qh; |
424 | int pipe = urb->pipe; |
425 | void *buffer = urb->transfer_buffer; |
426 | |
427 | /* musb_ep_select(mbase, epnum); */ |
428 | rx_count = musb_readw(epio, MUSB_RXCOUNT); |
429 | musb_dbg(musb, fmt: "RX%d count %d, buffer %p len %d/%d" , epnum, rx_count, |
430 | urb->transfer_buffer, qh->offset, |
431 | urb->transfer_buffer_length); |
432 | |
433 | /* unload FIFO */ |
434 | if (usb_pipeisoc(pipe)) { |
435 | int status = 0; |
436 | struct usb_iso_packet_descriptor *d; |
437 | |
438 | if (iso_err) { |
439 | status = -EILSEQ; |
440 | urb->error_count++; |
441 | } |
442 | |
443 | d = urb->iso_frame_desc + qh->iso_idx; |
444 | buf = buffer + d->offset; |
445 | length = d->length; |
446 | if (rx_count > length) { |
447 | if (status == 0) { |
448 | status = -EOVERFLOW; |
449 | urb->error_count++; |
450 | } |
451 | musb_dbg(musb, fmt: "OVERFLOW %d into %d" , rx_count, length); |
452 | do_flush = 1; |
453 | } else |
454 | length = rx_count; |
455 | urb->actual_length += length; |
456 | d->actual_length = length; |
457 | |
458 | d->status = status; |
459 | |
460 | /* see if we are done */ |
461 | done = (++qh->iso_idx >= urb->number_of_packets); |
462 | } else { |
463 | /* non-isoch */ |
464 | buf = buffer + qh->offset; |
465 | length = urb->transfer_buffer_length - qh->offset; |
466 | if (rx_count > length) { |
467 | if (urb->status == -EINPROGRESS) |
468 | urb->status = -EOVERFLOW; |
469 | musb_dbg(musb, fmt: "OVERFLOW %d into %d" , rx_count, length); |
470 | do_flush = 1; |
471 | } else |
472 | length = rx_count; |
473 | urb->actual_length += length; |
474 | qh->offset += length; |
475 | |
476 | /* see if we are done */ |
477 | done = (urb->actual_length == urb->transfer_buffer_length) |
478 | || (rx_count < qh->maxpacket) |
479 | || (urb->status != -EINPROGRESS); |
480 | if (done |
481 | && (urb->status == -EINPROGRESS) |
482 | && (urb->transfer_flags & URB_SHORT_NOT_OK) |
483 | && (urb->actual_length |
484 | < urb->transfer_buffer_length)) |
485 | urb->status = -EREMOTEIO; |
486 | } |
487 | |
488 | musb_read_fifo(ep: hw_ep, len: length, dst: buf); |
489 | |
490 | csr = musb_readw(epio, MUSB_RXCSR); |
491 | csr |= MUSB_RXCSR_H_WZC_BITS; |
492 | if (unlikely(do_flush)) |
493 | musb_h_flush_rxfifo(hw_ep, csr); |
494 | else { |
495 | /* REVISIT this assumes AUTOCLEAR is never set */ |
496 | csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT); |
497 | if (!done) |
498 | csr |= MUSB_RXCSR_H_REQPKT; |
499 | musb_writew(epio, MUSB_RXCSR, csr); |
500 | } |
501 | |
502 | return done; |
503 | } |
504 | |
505 | /* we don't always need to reinit a given side of an endpoint... |
506 | * when we do, use tx/rx reinit routine and then construct a new CSR |
507 | * to address data toggle, NYET, and DMA or PIO. |
508 | * |
509 | * it's possible that driver bugs (especially for DMA) or aborting a |
510 | * transfer might have left the endpoint busier than it should be. |
511 | * the busy/not-empty tests are basically paranoia. |
512 | */ |
513 | static void |
514 | musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum) |
515 | { |
516 | struct musb_hw_ep *ep = musb->endpoints + epnum; |
517 | u16 csr; |
518 | |
519 | /* NOTE: we know the "rx" fifo reinit never triggers for ep0. |
520 | * That always uses tx_reinit since ep0 repurposes TX register |
521 | * offsets; the initial SETUP packet is also a kind of OUT. |
522 | */ |
523 | |
524 | /* if programmed for Tx, put it in RX mode */ |
525 | if (ep->is_shared_fifo) { |
526 | csr = musb_readw(ep->regs, MUSB_TXCSR); |
527 | if (csr & MUSB_TXCSR_MODE) { |
528 | musb_h_tx_flush_fifo(ep); |
529 | csr = musb_readw(ep->regs, MUSB_TXCSR); |
530 | musb_writew(ep->regs, MUSB_TXCSR, |
531 | csr | MUSB_TXCSR_FRCDATATOG); |
532 | } |
533 | |
534 | /* |
535 | * Clear the MODE bit (and everything else) to enable Rx. |
536 | * NOTE: we mustn't clear the DMAMODE bit before DMAENAB. |
537 | */ |
538 | if (csr & MUSB_TXCSR_DMAMODE) |
539 | musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE); |
540 | musb_writew(ep->regs, MUSB_TXCSR, 0); |
541 | |
542 | /* scrub all previous state, clearing toggle */ |
543 | } |
544 | csr = musb_readw(ep->regs, MUSB_RXCSR); |
545 | if (csr & MUSB_RXCSR_RXPKTRDY) |
546 | WARNING("rx%d, packet/%d ready?\n" , ep->epnum, |
547 | musb_readw(ep->regs, MUSB_RXCOUNT)); |
548 | |
549 | musb_h_flush_rxfifo(hw_ep: ep, MUSB_RXCSR_CLRDATATOG); |
550 | |
551 | /* target addr and (for multipoint) hub addr/port */ |
552 | if (musb->is_multipoint) { |
553 | musb_write_rxfunaddr(musb, epnum, qh_addr_reg: qh->addr_reg); |
554 | musb_write_rxhubaddr(musb, epnum, qh_h_addr_reg: qh->h_addr_reg); |
555 | musb_write_rxhubport(musb, epnum, qh_h_port_reg: qh->h_port_reg); |
556 | } else |
557 | musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg); |
558 | |
559 | /* protocol/endpoint, interval/NAKlimit, i/o size */ |
560 | musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg); |
561 | musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg); |
562 | /* NOTE: bulk combining rewrites high bits of maxpacket */ |
563 | /* Set RXMAXP with the FIFO size of the endpoint |
564 | * to disable double buffer mode. |
565 | */ |
566 | musb_writew(ep->regs, MUSB_RXMAXP, |
567 | qh->maxpacket | ((qh->hb_mult - 1) << 11)); |
568 | |
569 | ep->rx_reinit = 0; |
570 | } |
571 | |
572 | static void musb_tx_dma_set_mode_mentor(struct musb_hw_ep *hw_ep, |
573 | struct musb_qh *qh, |
574 | u32 *length, u8 *mode) |
575 | { |
576 | struct dma_channel *channel = hw_ep->tx_channel; |
577 | void __iomem *epio = hw_ep->regs; |
578 | u16 pkt_size = qh->maxpacket; |
579 | u16 csr; |
580 | |
581 | if (*length > channel->max_len) |
582 | *length = channel->max_len; |
583 | |
584 | csr = musb_readw(epio, MUSB_TXCSR); |
585 | if (*length > pkt_size) { |
586 | *mode = 1; |
587 | csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB; |
588 | /* autoset shouldn't be set in high bandwidth */ |
589 | /* |
590 | * Enable Autoset according to table |
591 | * below |
592 | * bulk_split hb_mult Autoset_Enable |
593 | * 0 1 Yes(Normal) |
594 | * 0 >1 No(High BW ISO) |
595 | * 1 1 Yes(HS bulk) |
596 | * 1 >1 Yes(FS bulk) |
597 | */ |
598 | if (qh->hb_mult == 1 || (qh->hb_mult > 1 && |
599 | can_bulk_split(hw_ep->musb, qh->type))) |
600 | csr |= MUSB_TXCSR_AUTOSET; |
601 | } else { |
602 | *mode = 0; |
603 | csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE); |
604 | csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */ |
605 | } |
606 | channel->desired_mode = *mode; |
607 | musb_writew(epio, MUSB_TXCSR, csr); |
608 | } |
609 | |
610 | static void musb_tx_dma_set_mode_cppi_tusb(struct musb_hw_ep *hw_ep, |
611 | struct urb *urb, |
612 | u8 *mode) |
613 | { |
614 | struct dma_channel *channel = hw_ep->tx_channel; |
615 | |
616 | channel->actual_len = 0; |
617 | |
618 | /* |
619 | * TX uses "RNDIS" mode automatically but needs help |
620 | * to identify the zero-length-final-packet case. |
621 | */ |
622 | *mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0; |
623 | } |
624 | |
625 | static bool musb_tx_dma_program(struct dma_controller *dma, |
626 | struct musb_hw_ep *hw_ep, struct musb_qh *qh, |
627 | struct urb *urb, u32 offset, u32 length) |
628 | { |
629 | struct dma_channel *channel = hw_ep->tx_channel; |
630 | u16 pkt_size = qh->maxpacket; |
631 | u8 mode; |
632 | |
633 | if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb)) |
634 | musb_tx_dma_set_mode_mentor(hw_ep, qh, |
635 | length: &length, mode: &mode); |
636 | else if (is_cppi_enabled(hw_ep->musb) || tusb_dma_omap(hw_ep->musb)) |
637 | musb_tx_dma_set_mode_cppi_tusb(hw_ep, urb, mode: &mode); |
638 | else |
639 | return false; |
640 | |
641 | qh->segsize = length; |
642 | |
643 | /* |
644 | * Ensure the data reaches to main memory before starting |
645 | * DMA transfer |
646 | */ |
647 | wmb(); |
648 | |
649 | if (!dma->channel_program(channel, pkt_size, mode, |
650 | urb->transfer_dma + offset, length)) { |
651 | void __iomem *epio = hw_ep->regs; |
652 | u16 csr; |
653 | |
654 | dma->channel_release(channel); |
655 | hw_ep->tx_channel = NULL; |
656 | |
657 | csr = musb_readw(epio, MUSB_TXCSR); |
658 | csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB); |
659 | musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS); |
660 | return false; |
661 | } |
662 | return true; |
663 | } |
664 | |
665 | /* |
666 | * Program an HDRC endpoint as per the given URB |
667 | * Context: irqs blocked, controller lock held |
668 | */ |
669 | static void musb_ep_program(struct musb *musb, u8 epnum, |
670 | struct urb *urb, int is_out, |
671 | u8 *buf, u32 offset, u32 len) |
672 | { |
673 | struct dma_controller *dma_controller; |
674 | struct dma_channel *dma_channel; |
675 | u8 dma_ok; |
676 | void __iomem *mbase = musb->mregs; |
677 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; |
678 | void __iomem *epio = hw_ep->regs; |
679 | struct musb_qh *qh = musb_ep_get_qh(ep: hw_ep, is_in: !is_out); |
680 | u16 packet_sz = qh->maxpacket; |
681 | u8 use_dma = 1; |
682 | u16 csr; |
683 | |
684 | musb_dbg(musb, fmt: "%s hw%d urb %p spd%d dev%d ep%d%s " |
685 | "h_addr%02x h_port%02x bytes %d" , |
686 | is_out ? "-->" : "<--" , |
687 | epnum, urb, urb->dev->speed, |
688 | qh->addr_reg, qh->epnum, is_out ? "out" : "in" , |
689 | qh->h_addr_reg, qh->h_port_reg, |
690 | len); |
691 | |
692 | musb_ep_select(mbase, epnum); |
693 | |
694 | if (is_out && !len) { |
695 | use_dma = 0; |
696 | csr = musb_readw(epio, MUSB_TXCSR); |
697 | csr &= ~MUSB_TXCSR_DMAENAB; |
698 | musb_writew(epio, MUSB_TXCSR, csr); |
699 | hw_ep->tx_channel = NULL; |
700 | } |
701 | |
702 | /* candidate for DMA? */ |
703 | dma_controller = musb->dma_controller; |
704 | if (use_dma && is_dma_capable() && epnum && dma_controller) { |
705 | dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel; |
706 | if (!dma_channel) { |
707 | dma_channel = dma_controller->channel_alloc( |
708 | dma_controller, hw_ep, is_out); |
709 | if (is_out) |
710 | hw_ep->tx_channel = dma_channel; |
711 | else |
712 | hw_ep->rx_channel = dma_channel; |
713 | } |
714 | } else |
715 | dma_channel = NULL; |
716 | |
717 | /* make sure we clear DMAEnab, autoSet bits from previous run */ |
718 | |
719 | /* OUT/transmit/EP0 or IN/receive? */ |
720 | if (is_out) { |
721 | u16 csr; |
722 | u16 int_txe; |
723 | u16 load_count; |
724 | |
725 | csr = musb_readw(epio, MUSB_TXCSR); |
726 | |
727 | /* disable interrupt in case we flush */ |
728 | int_txe = musb->intrtxe; |
729 | musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum)); |
730 | |
731 | /* general endpoint setup */ |
732 | if (epnum) { |
733 | /* flush all old state, set default */ |
734 | /* |
735 | * We could be flushing valid |
736 | * packets in double buffering |
737 | * case |
738 | */ |
739 | if (!hw_ep->tx_double_buffered) |
740 | musb_h_tx_flush_fifo(ep: hw_ep); |
741 | |
742 | /* |
743 | * We must not clear the DMAMODE bit before or in |
744 | * the same cycle with the DMAENAB bit, so we clear |
745 | * the latter first... |
746 | */ |
747 | csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT |
748 | | MUSB_TXCSR_AUTOSET |
749 | | MUSB_TXCSR_DMAENAB |
750 | | MUSB_TXCSR_FRCDATATOG |
751 | | MUSB_TXCSR_H_RXSTALL |
752 | | MUSB_TXCSR_H_ERROR |
753 | | MUSB_TXCSR_TXPKTRDY |
754 | ); |
755 | csr |= MUSB_TXCSR_MODE; |
756 | |
757 | if (!hw_ep->tx_double_buffered) |
758 | csr |= musb->io.set_toggle(qh, is_out, urb); |
759 | |
760 | musb_writew(epio, MUSB_TXCSR, csr); |
761 | /* REVISIT may need to clear FLUSHFIFO ... */ |
762 | csr &= ~MUSB_TXCSR_DMAMODE; |
763 | musb_writew(epio, MUSB_TXCSR, csr); |
764 | csr = musb_readw(epio, MUSB_TXCSR); |
765 | } else { |
766 | /* endpoint 0: just flush */ |
767 | musb_h_ep0_flush_fifo(ep: hw_ep); |
768 | } |
769 | |
770 | /* target addr and (for multipoint) hub addr/port */ |
771 | if (musb->is_multipoint) { |
772 | musb_write_txfunaddr(musb, epnum, qh_addr_reg: qh->addr_reg); |
773 | musb_write_txhubaddr(musb, epnum, qh_addr_reg: qh->h_addr_reg); |
774 | musb_write_txhubport(musb, epnum, qh_h_port_reg: qh->h_port_reg); |
775 | /* FIXME if !epnum, do the same for RX ... */ |
776 | } else |
777 | musb_writeb(mbase, MUSB_FADDR, qh->addr_reg); |
778 | |
779 | /* protocol/endpoint/interval/NAKlimit */ |
780 | if (epnum) { |
781 | musb_writeb(epio, MUSB_TXTYPE, qh->type_reg); |
782 | if (can_bulk_split(musb, qh->type)) { |
783 | qh->hb_mult = hw_ep->max_packet_sz_tx |
784 | / packet_sz; |
785 | musb_writew(epio, MUSB_TXMAXP, packet_sz |
786 | | ((qh->hb_mult) - 1) << 11); |
787 | } else { |
788 | musb_writew(epio, MUSB_TXMAXP, |
789 | qh->maxpacket | |
790 | ((qh->hb_mult - 1) << 11)); |
791 | } |
792 | musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg); |
793 | } else { |
794 | musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg); |
795 | if (musb->is_multipoint) |
796 | musb_writeb(epio, MUSB_TYPE0, |
797 | qh->type_reg); |
798 | } |
799 | |
800 | if (can_bulk_split(musb, qh->type)) |
801 | load_count = min((u32) hw_ep->max_packet_sz_tx, |
802 | len); |
803 | else |
804 | load_count = min((u32) packet_sz, len); |
805 | |
806 | if (dma_channel && musb_tx_dma_program(dma: dma_controller, |
807 | hw_ep, qh, urb, offset, length: len)) |
808 | load_count = 0; |
809 | |
810 | if (load_count) { |
811 | /* PIO to load FIFO */ |
812 | qh->segsize = load_count; |
813 | if (!buf) { |
814 | sg_miter_start(miter: &qh->sg_miter, sgl: urb->sg, nents: 1, |
815 | SG_MITER_ATOMIC |
816 | | SG_MITER_FROM_SG); |
817 | if (!sg_miter_next(miter: &qh->sg_miter)) { |
818 | dev_err(musb->controller, |
819 | "error: sg" |
820 | "list empty\n" ); |
821 | sg_miter_stop(miter: &qh->sg_miter); |
822 | goto finish; |
823 | } |
824 | buf = qh->sg_miter.addr + urb->sg->offset + |
825 | urb->actual_length; |
826 | load_count = min_t(u32, load_count, |
827 | qh->sg_miter.length); |
828 | musb_write_fifo(ep: hw_ep, len: load_count, src: buf); |
829 | qh->sg_miter.consumed = load_count; |
830 | sg_miter_stop(miter: &qh->sg_miter); |
831 | } else |
832 | musb_write_fifo(ep: hw_ep, len: load_count, src: buf); |
833 | } |
834 | finish: |
835 | /* re-enable interrupt */ |
836 | musb_writew(mbase, MUSB_INTRTXE, int_txe); |
837 | |
838 | /* IN/receive */ |
839 | } else { |
840 | u16 csr = 0; |
841 | |
842 | if (hw_ep->rx_reinit) { |
843 | musb_rx_reinit(musb, qh, epnum); |
844 | csr |= musb->io.set_toggle(qh, is_out, urb); |
845 | |
846 | if (qh->type == USB_ENDPOINT_XFER_INT) |
847 | csr |= MUSB_RXCSR_DISNYET; |
848 | |
849 | } else { |
850 | csr = musb_readw(hw_ep->regs, MUSB_RXCSR); |
851 | |
852 | if (csr & (MUSB_RXCSR_RXPKTRDY |
853 | | MUSB_RXCSR_DMAENAB |
854 | | MUSB_RXCSR_H_REQPKT)) |
855 | ERR("broken !rx_reinit, ep%d csr %04x\n" , |
856 | hw_ep->epnum, csr); |
857 | |
858 | /* scrub any stale state, leaving toggle alone */ |
859 | csr &= MUSB_RXCSR_DISNYET; |
860 | } |
861 | |
862 | /* kick things off */ |
863 | |
864 | if ((is_cppi_enabled(musb) || tusb_dma_omap(musb)) && dma_channel) { |
865 | /* Candidate for DMA */ |
866 | dma_channel->actual_len = 0L; |
867 | qh->segsize = len; |
868 | |
869 | /* AUTOREQ is in a DMA register */ |
870 | musb_writew(hw_ep->regs, MUSB_RXCSR, csr); |
871 | csr = musb_readw(hw_ep->regs, MUSB_RXCSR); |
872 | |
873 | /* |
874 | * Unless caller treats short RX transfers as |
875 | * errors, we dare not queue multiple transfers. |
876 | */ |
877 | dma_ok = dma_controller->channel_program(dma_channel, |
878 | packet_sz, !(urb->transfer_flags & |
879 | URB_SHORT_NOT_OK), |
880 | urb->transfer_dma + offset, |
881 | qh->segsize); |
882 | if (!dma_ok) { |
883 | dma_controller->channel_release(dma_channel); |
884 | hw_ep->rx_channel = dma_channel = NULL; |
885 | } else |
886 | csr |= MUSB_RXCSR_DMAENAB; |
887 | } |
888 | |
889 | csr |= MUSB_RXCSR_H_REQPKT; |
890 | musb_dbg(musb, fmt: "RXCSR%d := %04x" , epnum, csr); |
891 | musb_writew(hw_ep->regs, MUSB_RXCSR, csr); |
892 | csr = musb_readw(hw_ep->regs, MUSB_RXCSR); |
893 | } |
894 | } |
895 | |
896 | /* Schedule next QH from musb->in_bulk/out_bulk and move the current qh to |
897 | * the end; avoids starvation for other endpoints. |
898 | */ |
899 | static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep, |
900 | int is_in) |
901 | { |
902 | struct dma_channel *dma; |
903 | struct urb *urb; |
904 | void __iomem *mbase = musb->mregs; |
905 | void __iomem *epio = ep->regs; |
906 | struct musb_qh *cur_qh, *next_qh; |
907 | u16 rx_csr, tx_csr; |
908 | u16 toggle; |
909 | |
910 | musb_ep_select(mbase, ep->epnum); |
911 | if (is_in) { |
912 | dma = is_dma_capable() ? ep->rx_channel : NULL; |
913 | |
914 | /* |
915 | * Need to stop the transaction by clearing REQPKT first |
916 | * then the NAK Timeout bit ref MUSBMHDRC USB 2.0 HIGH-SPEED |
917 | * DUAL-ROLE CONTROLLER Programmer's Guide, section 9.2.2 |
918 | */ |
919 | rx_csr = musb_readw(epio, MUSB_RXCSR); |
920 | rx_csr |= MUSB_RXCSR_H_WZC_BITS; |
921 | rx_csr &= ~MUSB_RXCSR_H_REQPKT; |
922 | musb_writew(epio, MUSB_RXCSR, rx_csr); |
923 | rx_csr &= ~MUSB_RXCSR_DATAERROR; |
924 | musb_writew(epio, MUSB_RXCSR, rx_csr); |
925 | |
926 | cur_qh = first_qh(q: &musb->in_bulk); |
927 | } else { |
928 | dma = is_dma_capable() ? ep->tx_channel : NULL; |
929 | |
930 | /* clear nak timeout bit */ |
931 | tx_csr = musb_readw(epio, MUSB_TXCSR); |
932 | tx_csr |= MUSB_TXCSR_H_WZC_BITS; |
933 | tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT; |
934 | musb_writew(epio, MUSB_TXCSR, tx_csr); |
935 | |
936 | cur_qh = first_qh(q: &musb->out_bulk); |
937 | } |
938 | if (cur_qh) { |
939 | urb = next_urb(qh: cur_qh); |
940 | if (dma_channel_status(c: dma) == MUSB_DMA_STATUS_BUSY) { |
941 | dma->status = MUSB_DMA_STATUS_CORE_ABORT; |
942 | musb->dma_controller->channel_abort(dma); |
943 | urb->actual_length += dma->actual_len; |
944 | dma->actual_len = 0L; |
945 | } |
946 | toggle = musb->io.get_toggle(cur_qh, !is_in); |
947 | usb_settoggle(urb->dev, cur_qh->epnum, !is_in, toggle ? 1 : 0); |
948 | |
949 | if (is_in) { |
950 | /* move cur_qh to end of queue */ |
951 | list_move_tail(list: &cur_qh->ring, head: &musb->in_bulk); |
952 | |
953 | /* get the next qh from musb->in_bulk */ |
954 | next_qh = first_qh(q: &musb->in_bulk); |
955 | |
956 | /* set rx_reinit and schedule the next qh */ |
957 | ep->rx_reinit = 1; |
958 | } else { |
959 | /* move cur_qh to end of queue */ |
960 | list_move_tail(list: &cur_qh->ring, head: &musb->out_bulk); |
961 | |
962 | /* get the next qh from musb->out_bulk */ |
963 | next_qh = first_qh(q: &musb->out_bulk); |
964 | |
965 | /* set tx_reinit and schedule the next qh */ |
966 | ep->tx_reinit = 1; |
967 | } |
968 | |
969 | if (next_qh) |
970 | musb_start_urb(musb, is_in, qh: next_qh); |
971 | } |
972 | } |
973 | |
974 | /* |
975 | * Service the default endpoint (ep0) as host. |
976 | * Return true until it's time to start the status stage. |
977 | */ |
978 | static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb) |
979 | { |
980 | bool more = false; |
981 | u8 *fifo_dest = NULL; |
982 | u16 fifo_count = 0; |
983 | struct musb_hw_ep *hw_ep = musb->control_ep; |
984 | struct musb_qh *qh = hw_ep->in_qh; |
985 | struct usb_ctrlrequest *request; |
986 | |
987 | switch (musb->ep0_stage) { |
988 | case MUSB_EP0_IN: |
989 | fifo_dest = urb->transfer_buffer + urb->actual_length; |
990 | fifo_count = min_t(size_t, len, urb->transfer_buffer_length - |
991 | urb->actual_length); |
992 | if (fifo_count < len) |
993 | urb->status = -EOVERFLOW; |
994 | |
995 | musb_read_fifo(ep: hw_ep, len: fifo_count, dst: fifo_dest); |
996 | |
997 | urb->actual_length += fifo_count; |
998 | if (len < qh->maxpacket) { |
999 | /* always terminate on short read; it's |
1000 | * rarely reported as an error. |
1001 | */ |
1002 | } else if (urb->actual_length < |
1003 | urb->transfer_buffer_length) |
1004 | more = true; |
1005 | break; |
1006 | case MUSB_EP0_START: |
1007 | request = (struct usb_ctrlrequest *) urb->setup_packet; |
1008 | |
1009 | if (!request->wLength) { |
1010 | musb_dbg(musb, fmt: "start no-DATA" ); |
1011 | break; |
1012 | } else if (request->bRequestType & USB_DIR_IN) { |
1013 | musb_dbg(musb, fmt: "start IN-DATA" ); |
1014 | musb->ep0_stage = MUSB_EP0_IN; |
1015 | more = true; |
1016 | break; |
1017 | } else { |
1018 | musb_dbg(musb, fmt: "start OUT-DATA" ); |
1019 | musb->ep0_stage = MUSB_EP0_OUT; |
1020 | more = true; |
1021 | } |
1022 | fallthrough; |
1023 | case MUSB_EP0_OUT: |
1024 | fifo_count = min_t(size_t, qh->maxpacket, |
1025 | urb->transfer_buffer_length - |
1026 | urb->actual_length); |
1027 | if (fifo_count) { |
1028 | fifo_dest = (u8 *) (urb->transfer_buffer |
1029 | + urb->actual_length); |
1030 | musb_dbg(musb, fmt: "Sending %d byte%s to ep0 fifo %p" , |
1031 | fifo_count, |
1032 | (fifo_count == 1) ? "" : "s" , |
1033 | fifo_dest); |
1034 | musb_write_fifo(ep: hw_ep, len: fifo_count, src: fifo_dest); |
1035 | |
1036 | urb->actual_length += fifo_count; |
1037 | more = true; |
1038 | } |
1039 | break; |
1040 | default: |
1041 | ERR("bogus ep0 stage %d\n" , musb->ep0_stage); |
1042 | break; |
1043 | } |
1044 | |
1045 | return more; |
1046 | } |
1047 | |
1048 | /* |
1049 | * Handle default endpoint interrupt as host. Only called in IRQ time |
1050 | * from musb_interrupt(). |
1051 | * |
1052 | * called with controller irqlocked |
1053 | */ |
1054 | irqreturn_t musb_h_ep0_irq(struct musb *musb) |
1055 | { |
1056 | struct urb *urb; |
1057 | u16 csr, len; |
1058 | int status = 0; |
1059 | void __iomem *mbase = musb->mregs; |
1060 | struct musb_hw_ep *hw_ep = musb->control_ep; |
1061 | void __iomem *epio = hw_ep->regs; |
1062 | struct musb_qh *qh = hw_ep->in_qh; |
1063 | bool complete = false; |
1064 | irqreturn_t retval = IRQ_NONE; |
1065 | |
1066 | /* ep0 only has one queue, "in" */ |
1067 | urb = next_urb(qh); |
1068 | |
1069 | musb_ep_select(mbase, 0); |
1070 | csr = musb_readw(epio, MUSB_CSR0); |
1071 | len = (csr & MUSB_CSR0_RXPKTRDY) |
1072 | ? musb_readb(epio, MUSB_COUNT0) |
1073 | : 0; |
1074 | |
1075 | musb_dbg(musb, fmt: "<== csr0 %04x, qh %p, count %d, urb %p, stage %d" , |
1076 | csr, qh, len, urb, musb->ep0_stage); |
1077 | |
1078 | /* if we just did status stage, we are done */ |
1079 | if (MUSB_EP0_STATUS == musb->ep0_stage) { |
1080 | retval = IRQ_HANDLED; |
1081 | complete = true; |
1082 | } |
1083 | |
1084 | /* prepare status */ |
1085 | if (csr & MUSB_CSR0_H_RXSTALL) { |
1086 | musb_dbg(musb, fmt: "STALLING ENDPOINT" ); |
1087 | status = -EPIPE; |
1088 | |
1089 | } else if (csr & MUSB_CSR0_H_ERROR) { |
1090 | musb_dbg(musb, fmt: "no response, csr0 %04x" , csr); |
1091 | status = -EPROTO; |
1092 | |
1093 | } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) { |
1094 | musb_dbg(musb, fmt: "control NAK timeout" ); |
1095 | |
1096 | /* NOTE: this code path would be a good place to PAUSE a |
1097 | * control transfer, if another one is queued, so that |
1098 | * ep0 is more likely to stay busy. That's already done |
1099 | * for bulk RX transfers. |
1100 | * |
1101 | * if (qh->ring.next != &musb->control), then |
1102 | * we have a candidate... NAKing is *NOT* an error |
1103 | */ |
1104 | musb_writew(epio, MUSB_CSR0, 0); |
1105 | retval = IRQ_HANDLED; |
1106 | } |
1107 | |
1108 | if (status) { |
1109 | musb_dbg(musb, fmt: "aborting" ); |
1110 | retval = IRQ_HANDLED; |
1111 | if (urb) |
1112 | urb->status = status; |
1113 | complete = true; |
1114 | |
1115 | /* use the proper sequence to abort the transfer */ |
1116 | if (csr & MUSB_CSR0_H_REQPKT) { |
1117 | csr &= ~MUSB_CSR0_H_REQPKT; |
1118 | musb_writew(epio, MUSB_CSR0, csr); |
1119 | csr &= ~MUSB_CSR0_H_NAKTIMEOUT; |
1120 | musb_writew(epio, MUSB_CSR0, csr); |
1121 | } else { |
1122 | musb_h_ep0_flush_fifo(ep: hw_ep); |
1123 | } |
1124 | |
1125 | musb_writeb(epio, MUSB_NAKLIMIT0, 0); |
1126 | |
1127 | /* clear it */ |
1128 | musb_writew(epio, MUSB_CSR0, 0); |
1129 | } |
1130 | |
1131 | if (unlikely(!urb)) { |
1132 | /* stop endpoint since we have no place for its data, this |
1133 | * SHOULD NEVER HAPPEN! */ |
1134 | ERR("no URB for end 0\n" ); |
1135 | |
1136 | musb_h_ep0_flush_fifo(ep: hw_ep); |
1137 | goto done; |
1138 | } |
1139 | |
1140 | if (!complete) { |
1141 | /* call common logic and prepare response */ |
1142 | if (musb_h_ep0_continue(musb, len, urb)) { |
1143 | /* more packets required */ |
1144 | csr = (MUSB_EP0_IN == musb->ep0_stage) |
1145 | ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY; |
1146 | } else { |
1147 | /* data transfer complete; perform status phase */ |
1148 | if (usb_pipeout(urb->pipe) |
1149 | || !urb->transfer_buffer_length) |
1150 | csr = MUSB_CSR0_H_STATUSPKT |
1151 | | MUSB_CSR0_H_REQPKT; |
1152 | else |
1153 | csr = MUSB_CSR0_H_STATUSPKT |
1154 | | MUSB_CSR0_TXPKTRDY; |
1155 | |
1156 | /* disable ping token in status phase */ |
1157 | csr |= MUSB_CSR0_H_DIS_PING; |
1158 | |
1159 | /* flag status stage */ |
1160 | musb->ep0_stage = MUSB_EP0_STATUS; |
1161 | |
1162 | musb_dbg(musb, fmt: "ep0 STATUS, csr %04x" , csr); |
1163 | |
1164 | } |
1165 | musb_writew(epio, MUSB_CSR0, csr); |
1166 | retval = IRQ_HANDLED; |
1167 | } else |
1168 | musb->ep0_stage = MUSB_EP0_IDLE; |
1169 | |
1170 | /* call completion handler if done */ |
1171 | if (complete) |
1172 | musb_advance_schedule(musb, urb, hw_ep, is_in: 1); |
1173 | done: |
1174 | return retval; |
1175 | } |
1176 | |
1177 | |
1178 | #ifdef CONFIG_USB_INVENTRA_DMA |
1179 | |
1180 | /* Host side TX (OUT) using Mentor DMA works as follows: |
1181 | submit_urb -> |
1182 | - if queue was empty, Program Endpoint |
1183 | - ... which starts DMA to fifo in mode 1 or 0 |
1184 | |
1185 | DMA Isr (transfer complete) -> TxAvail() |
1186 | - Stop DMA (~DmaEnab) (<--- Alert ... currently happens |
1187 | only in musb_cleanup_urb) |
1188 | - TxPktRdy has to be set in mode 0 or for |
1189 | short packets in mode 1. |
1190 | */ |
1191 | |
1192 | #endif |
1193 | |
1194 | /* Service a Tx-Available or dma completion irq for the endpoint */ |
1195 | void musb_host_tx(struct musb *musb, u8 epnum) |
1196 | { |
1197 | int pipe; |
1198 | bool done = false; |
1199 | u16 tx_csr; |
1200 | size_t length = 0; |
1201 | size_t offset = 0; |
1202 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; |
1203 | void __iomem *epio = hw_ep->regs; |
1204 | struct musb_qh *qh = hw_ep->out_qh; |
1205 | struct urb *urb = next_urb(qh); |
1206 | u32 status = 0; |
1207 | void __iomem *mbase = musb->mregs; |
1208 | struct dma_channel *dma; |
1209 | bool transfer_pending = false; |
1210 | |
1211 | musb_ep_select(mbase, epnum); |
1212 | tx_csr = musb_readw(epio, MUSB_TXCSR); |
1213 | |
1214 | /* with CPPI, DMA sometimes triggers "extra" irqs */ |
1215 | if (!urb) { |
1216 | musb_dbg(musb, fmt: "extra TX%d ready, csr %04x" , epnum, tx_csr); |
1217 | return; |
1218 | } |
1219 | |
1220 | pipe = urb->pipe; |
1221 | dma = is_dma_capable() ? hw_ep->tx_channel : NULL; |
1222 | trace_musb_urb_tx(musb, urb); |
1223 | musb_dbg(musb, fmt: "OUT/TX%d end, csr %04x%s" , epnum, tx_csr, |
1224 | dma ? ", dma" : "" ); |
1225 | |
1226 | /* check for errors */ |
1227 | if (tx_csr & MUSB_TXCSR_H_RXSTALL) { |
1228 | /* dma was disabled, fifo flushed */ |
1229 | musb_dbg(musb, fmt: "TX end %d stall" , epnum); |
1230 | |
1231 | /* stall; record URB status */ |
1232 | status = -EPIPE; |
1233 | |
1234 | } else if (tx_csr & MUSB_TXCSR_H_ERROR) { |
1235 | /* (NON-ISO) dma was disabled, fifo flushed */ |
1236 | musb_dbg(musb, fmt: "TX 3strikes on ep=%d" , epnum); |
1237 | |
1238 | status = -ETIMEDOUT; |
1239 | |
1240 | } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) { |
1241 | if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1 |
1242 | && !list_is_singular(head: &musb->out_bulk)) { |
1243 | musb_dbg(musb, fmt: "NAK timeout on TX%d ep" , epnum); |
1244 | musb_bulk_nak_timeout(musb, ep: hw_ep, is_in: 0); |
1245 | } else { |
1246 | musb_dbg(musb, fmt: "TX ep%d device not responding" , epnum); |
1247 | /* NOTE: this code path would be a good place to PAUSE a |
1248 | * transfer, if there's some other (nonperiodic) tx urb |
1249 | * that could use this fifo. (dma complicates it...) |
1250 | * That's already done for bulk RX transfers. |
1251 | * |
1252 | * if (bulk && qh->ring.next != &musb->out_bulk), then |
1253 | * we have a candidate... NAKing is *NOT* an error |
1254 | */ |
1255 | musb_ep_select(mbase, epnum); |
1256 | musb_writew(epio, MUSB_TXCSR, |
1257 | MUSB_TXCSR_H_WZC_BITS |
1258 | | MUSB_TXCSR_TXPKTRDY); |
1259 | } |
1260 | return; |
1261 | } |
1262 | |
1263 | done: |
1264 | if (status) { |
1265 | if (dma_channel_status(c: dma) == MUSB_DMA_STATUS_BUSY) { |
1266 | dma->status = MUSB_DMA_STATUS_CORE_ABORT; |
1267 | musb->dma_controller->channel_abort(dma); |
1268 | } |
1269 | |
1270 | /* do the proper sequence to abort the transfer in the |
1271 | * usb core; the dma engine should already be stopped. |
1272 | */ |
1273 | musb_h_tx_flush_fifo(ep: hw_ep); |
1274 | tx_csr &= ~(MUSB_TXCSR_AUTOSET |
1275 | | MUSB_TXCSR_DMAENAB |
1276 | | MUSB_TXCSR_H_ERROR |
1277 | | MUSB_TXCSR_H_RXSTALL |
1278 | | MUSB_TXCSR_H_NAKTIMEOUT |
1279 | ); |
1280 | |
1281 | musb_ep_select(mbase, epnum); |
1282 | musb_writew(epio, MUSB_TXCSR, tx_csr); |
1283 | /* REVISIT may need to clear FLUSHFIFO ... */ |
1284 | musb_writew(epio, MUSB_TXCSR, tx_csr); |
1285 | musb_writeb(epio, MUSB_TXINTERVAL, 0); |
1286 | |
1287 | done = true; |
1288 | } |
1289 | |
1290 | /* second cppi case */ |
1291 | if (dma_channel_status(c: dma) == MUSB_DMA_STATUS_BUSY) { |
1292 | musb_dbg(musb, fmt: "extra TX%d ready, csr %04x" , epnum, tx_csr); |
1293 | return; |
1294 | } |
1295 | |
1296 | if (is_dma_capable() && dma && !status) { |
1297 | /* |
1298 | * DMA has completed. But if we're using DMA mode 1 (multi |
1299 | * packet DMA), we need a terminal TXPKTRDY interrupt before |
1300 | * we can consider this transfer completed, lest we trash |
1301 | * its last packet when writing the next URB's data. So we |
1302 | * switch back to mode 0 to get that interrupt; we'll come |
1303 | * back here once it happens. |
1304 | */ |
1305 | if (tx_csr & MUSB_TXCSR_DMAMODE) { |
1306 | /* |
1307 | * We shouldn't clear DMAMODE with DMAENAB set; so |
1308 | * clear them in a safe order. That should be OK |
1309 | * once TXPKTRDY has been set (and I've never seen |
1310 | * it being 0 at this moment -- DMA interrupt latency |
1311 | * is significant) but if it hasn't been then we have |
1312 | * no choice but to stop being polite and ignore the |
1313 | * programmer's guide... :-) |
1314 | * |
1315 | * Note that we must write TXCSR with TXPKTRDY cleared |
1316 | * in order not to re-trigger the packet send (this bit |
1317 | * can't be cleared by CPU), and there's another caveat: |
1318 | * TXPKTRDY may be set shortly and then cleared in the |
1319 | * double-buffered FIFO mode, so we do an extra TXCSR |
1320 | * read for debouncing... |
1321 | */ |
1322 | tx_csr &= musb_readw(epio, MUSB_TXCSR); |
1323 | if (tx_csr & MUSB_TXCSR_TXPKTRDY) { |
1324 | tx_csr &= ~(MUSB_TXCSR_DMAENAB | |
1325 | MUSB_TXCSR_TXPKTRDY); |
1326 | musb_writew(epio, MUSB_TXCSR, |
1327 | tx_csr | MUSB_TXCSR_H_WZC_BITS); |
1328 | } |
1329 | tx_csr &= ~(MUSB_TXCSR_DMAMODE | |
1330 | MUSB_TXCSR_TXPKTRDY); |
1331 | musb_writew(epio, MUSB_TXCSR, |
1332 | tx_csr | MUSB_TXCSR_H_WZC_BITS); |
1333 | |
1334 | /* |
1335 | * There is no guarantee that we'll get an interrupt |
1336 | * after clearing DMAMODE as we might have done this |
1337 | * too late (after TXPKTRDY was cleared by controller). |
1338 | * Re-read TXCSR as we have spoiled its previous value. |
1339 | */ |
1340 | tx_csr = musb_readw(epio, MUSB_TXCSR); |
1341 | } |
1342 | |
1343 | /* |
1344 | * We may get here from a DMA completion or TXPKTRDY interrupt. |
1345 | * In any case, we must check the FIFO status here and bail out |
1346 | * only if the FIFO still has data -- that should prevent the |
1347 | * "missed" TXPKTRDY interrupts and deal with double-buffered |
1348 | * FIFO mode too... |
1349 | */ |
1350 | if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) { |
1351 | musb_dbg(musb, |
1352 | fmt: "DMA complete but FIFO not empty, CSR %04x" , |
1353 | tx_csr); |
1354 | return; |
1355 | } |
1356 | } |
1357 | |
1358 | if (!status || dma || usb_pipeisoc(pipe)) { |
1359 | if (dma) |
1360 | length = dma->actual_len; |
1361 | else |
1362 | length = qh->segsize; |
1363 | qh->offset += length; |
1364 | |
1365 | if (usb_pipeisoc(pipe)) { |
1366 | struct usb_iso_packet_descriptor *d; |
1367 | |
1368 | d = urb->iso_frame_desc + qh->iso_idx; |
1369 | d->actual_length = length; |
1370 | d->status = status; |
1371 | if (++qh->iso_idx >= urb->number_of_packets) { |
1372 | done = true; |
1373 | } else { |
1374 | d++; |
1375 | offset = d->offset; |
1376 | length = d->length; |
1377 | } |
1378 | } else if (dma && urb->transfer_buffer_length == qh->offset) { |
1379 | done = true; |
1380 | } else { |
1381 | /* see if we need to send more data, or ZLP */ |
1382 | if (qh->segsize < qh->maxpacket) |
1383 | done = true; |
1384 | else if (qh->offset == urb->transfer_buffer_length |
1385 | && !(urb->transfer_flags |
1386 | & URB_ZERO_PACKET)) |
1387 | done = true; |
1388 | if (!done) { |
1389 | offset = qh->offset; |
1390 | length = urb->transfer_buffer_length - offset; |
1391 | transfer_pending = true; |
1392 | } |
1393 | } |
1394 | } |
1395 | |
1396 | /* urb->status != -EINPROGRESS means request has been faulted, |
1397 | * so we must abort this transfer after cleanup |
1398 | */ |
1399 | if (urb->status != -EINPROGRESS) { |
1400 | done = true; |
1401 | if (status == 0) |
1402 | status = urb->status; |
1403 | } |
1404 | |
1405 | if (done) { |
1406 | /* set status */ |
1407 | urb->status = status; |
1408 | urb->actual_length = qh->offset; |
1409 | musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT); |
1410 | return; |
1411 | } else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) { |
1412 | if (musb_tx_dma_program(dma: musb->dma_controller, hw_ep, qh, urb, |
1413 | offset, length)) { |
1414 | if (is_cppi_enabled(musb) || tusb_dma_omap(musb)) |
1415 | musb_h_tx_dma_start(ep: hw_ep); |
1416 | return; |
1417 | } |
1418 | } else if (tx_csr & MUSB_TXCSR_DMAENAB) { |
1419 | musb_dbg(musb, fmt: "not complete, but DMA enabled?" ); |
1420 | return; |
1421 | } |
1422 | |
1423 | /* |
1424 | * PIO: start next packet in this URB. |
1425 | * |
1426 | * REVISIT: some docs say that when hw_ep->tx_double_buffered, |
1427 | * (and presumably, FIFO is not half-full) we should write *two* |
1428 | * packets before updating TXCSR; other docs disagree... |
1429 | */ |
1430 | if (length > qh->maxpacket) |
1431 | length = qh->maxpacket; |
1432 | /* Unmap the buffer so that CPU can use it */ |
1433 | usb_hcd_unmap_urb_for_dma(musb->hcd, urb); |
1434 | |
1435 | /* |
1436 | * We need to map sg if the transfer_buffer is |
1437 | * NULL. |
1438 | */ |
1439 | if (!urb->transfer_buffer) { |
1440 | /* sg_miter_start is already done in musb_ep_program */ |
1441 | if (!sg_miter_next(miter: &qh->sg_miter)) { |
1442 | dev_err(musb->controller, "error: sg list empty\n" ); |
1443 | sg_miter_stop(miter: &qh->sg_miter); |
1444 | status = -EINVAL; |
1445 | goto done; |
1446 | } |
1447 | length = min_t(u32, length, qh->sg_miter.length); |
1448 | musb_write_fifo(ep: hw_ep, len: length, src: qh->sg_miter.addr); |
1449 | qh->sg_miter.consumed = length; |
1450 | sg_miter_stop(miter: &qh->sg_miter); |
1451 | } else { |
1452 | musb_write_fifo(ep: hw_ep, len: length, src: urb->transfer_buffer + offset); |
1453 | } |
1454 | |
1455 | qh->segsize = length; |
1456 | |
1457 | musb_ep_select(mbase, epnum); |
1458 | musb_writew(epio, MUSB_TXCSR, |
1459 | MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY); |
1460 | } |
1461 | |
1462 | #ifdef CONFIG_USB_TI_CPPI41_DMA |
1463 | /* Seems to set up ISO for cppi41 and not advance len. See commit c57c41d */ |
1464 | static int musb_rx_dma_iso_cppi41(struct dma_controller *dma, |
1465 | struct musb_hw_ep *hw_ep, |
1466 | struct musb_qh *qh, |
1467 | struct urb *urb, |
1468 | size_t len) |
1469 | { |
1470 | struct dma_channel *channel = hw_ep->rx_channel; |
1471 | void __iomem *epio = hw_ep->regs; |
1472 | dma_addr_t *buf; |
1473 | u32 length; |
1474 | u16 val; |
1475 | |
1476 | buf = (void *)urb->iso_frame_desc[qh->iso_idx].offset + |
1477 | (u32)urb->transfer_dma; |
1478 | |
1479 | length = urb->iso_frame_desc[qh->iso_idx].length; |
1480 | |
1481 | val = musb_readw(epio, MUSB_RXCSR); |
1482 | val |= MUSB_RXCSR_DMAENAB; |
1483 | musb_writew(hw_ep->regs, MUSB_RXCSR, val); |
1484 | |
1485 | return dma->channel_program(channel, qh->maxpacket, 0, |
1486 | (u32)buf, length); |
1487 | } |
1488 | #else |
1489 | static inline int musb_rx_dma_iso_cppi41(struct dma_controller *dma, |
1490 | struct musb_hw_ep *hw_ep, |
1491 | struct musb_qh *qh, |
1492 | struct urb *urb, |
1493 | size_t len) |
1494 | { |
1495 | return false; |
1496 | } |
1497 | #endif |
1498 | |
1499 | #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) || \ |
1500 | defined(CONFIG_USB_TI_CPPI41_DMA) |
1501 | /* Host side RX (IN) using Mentor DMA works as follows: |
1502 | submit_urb -> |
1503 | - if queue was empty, ProgramEndpoint |
1504 | - first IN token is sent out (by setting ReqPkt) |
1505 | LinuxIsr -> RxReady() |
1506 | /\ => first packet is received |
1507 | | - Set in mode 0 (DmaEnab, ~ReqPkt) |
1508 | | -> DMA Isr (transfer complete) -> RxReady() |
1509 | | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab) |
1510 | | - if urb not complete, send next IN token (ReqPkt) |
1511 | | | else complete urb. |
1512 | | | |
1513 | --------------------------- |
1514 | * |
1515 | * Nuances of mode 1: |
1516 | * For short packets, no ack (+RxPktRdy) is sent automatically |
1517 | * (even if AutoClear is ON) |
1518 | * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent |
1519 | * automatically => major problem, as collecting the next packet becomes |
1520 | * difficult. Hence mode 1 is not used. |
1521 | * |
1522 | * REVISIT |
1523 | * All we care about at this driver level is that |
1524 | * (a) all URBs terminate with REQPKT cleared and fifo(s) empty; |
1525 | * (b) termination conditions are: short RX, or buffer full; |
1526 | * (c) fault modes include |
1527 | * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO. |
1528 | * (and that endpoint's dma queue stops immediately) |
1529 | * - overflow (full, PLUS more bytes in the terminal packet) |
1530 | * |
1531 | * So for example, usb-storage sets URB_SHORT_NOT_OK, and would |
1532 | * thus be a great candidate for using mode 1 ... for all but the |
1533 | * last packet of one URB's transfer. |
1534 | */ |
1535 | static int musb_rx_dma_inventra_cppi41(struct dma_controller *dma, |
1536 | struct musb_hw_ep *hw_ep, |
1537 | struct musb_qh *qh, |
1538 | struct urb *urb, |
1539 | size_t len) |
1540 | { |
1541 | struct dma_channel *channel = hw_ep->rx_channel; |
1542 | void __iomem *epio = hw_ep->regs; |
1543 | u16 val; |
1544 | int pipe; |
1545 | bool done; |
1546 | |
1547 | pipe = urb->pipe; |
1548 | |
1549 | if (usb_pipeisoc(pipe)) { |
1550 | struct usb_iso_packet_descriptor *d; |
1551 | |
1552 | d = urb->iso_frame_desc + qh->iso_idx; |
1553 | d->actual_length = len; |
1554 | |
1555 | /* even if there was an error, we did the dma |
1556 | * for iso_frame_desc->length |
1557 | */ |
1558 | if (d->status != -EILSEQ && d->status != -EOVERFLOW) |
1559 | d->status = 0; |
1560 | |
1561 | if (++qh->iso_idx >= urb->number_of_packets) { |
1562 | done = true; |
1563 | } else { |
1564 | /* REVISIT: Why ignore return value here? */ |
1565 | if (musb_dma_cppi41(hw_ep->musb)) |
1566 | done = musb_rx_dma_iso_cppi41(dma, hw_ep, qh, |
1567 | urb, len); |
1568 | done = false; |
1569 | } |
1570 | |
1571 | } else { |
1572 | /* done if urb buffer is full or short packet is recd */ |
1573 | done = (urb->actual_length + len >= |
1574 | urb->transfer_buffer_length |
1575 | || channel->actual_len < qh->maxpacket |
1576 | || channel->rx_packet_done); |
1577 | } |
1578 | |
1579 | /* send IN token for next packet, without AUTOREQ */ |
1580 | if (!done) { |
1581 | val = musb_readw(epio, MUSB_RXCSR); |
1582 | val |= MUSB_RXCSR_H_REQPKT; |
1583 | musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val); |
1584 | } |
1585 | |
1586 | return done; |
1587 | } |
1588 | |
1589 | /* Disadvantage of using mode 1: |
1590 | * It's basically usable only for mass storage class; essentially all |
1591 | * other protocols also terminate transfers on short packets. |
1592 | * |
1593 | * Details: |
1594 | * An extra IN token is sent at the end of the transfer (due to AUTOREQ) |
1595 | * If you try to use mode 1 for (transfer_buffer_length - 512), and try |
1596 | * to use the extra IN token to grab the last packet using mode 0, then |
1597 | * the problem is that you cannot be sure when the device will send the |
1598 | * last packet and RxPktRdy set. Sometimes the packet is recd too soon |
1599 | * such that it gets lost when RxCSR is re-set at the end of the mode 1 |
1600 | * transfer, while sometimes it is recd just a little late so that if you |
1601 | * try to configure for mode 0 soon after the mode 1 transfer is |
1602 | * completed, you will find rxcount 0. Okay, so you might think why not |
1603 | * wait for an interrupt when the pkt is recd. Well, you won't get any! |
1604 | */ |
1605 | static int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma, |
1606 | struct musb_hw_ep *hw_ep, |
1607 | struct musb_qh *qh, |
1608 | struct urb *urb, |
1609 | size_t len, |
1610 | u8 iso_err) |
1611 | { |
1612 | struct musb *musb = hw_ep->musb; |
1613 | void __iomem *epio = hw_ep->regs; |
1614 | struct dma_channel *channel = hw_ep->rx_channel; |
1615 | u16 rx_count, val; |
1616 | int length, pipe, done; |
1617 | dma_addr_t buf; |
1618 | |
1619 | rx_count = musb_readw(epio, MUSB_RXCOUNT); |
1620 | pipe = urb->pipe; |
1621 | |
1622 | if (usb_pipeisoc(pipe)) { |
1623 | int d_status = 0; |
1624 | struct usb_iso_packet_descriptor *d; |
1625 | |
1626 | d = urb->iso_frame_desc + qh->iso_idx; |
1627 | |
1628 | if (iso_err) { |
1629 | d_status = -EILSEQ; |
1630 | urb->error_count++; |
1631 | } |
1632 | if (rx_count > d->length) { |
1633 | if (d_status == 0) { |
1634 | d_status = -EOVERFLOW; |
1635 | urb->error_count++; |
1636 | } |
1637 | musb_dbg(musb, "** OVERFLOW %d into %d" , |
1638 | rx_count, d->length); |
1639 | |
1640 | length = d->length; |
1641 | } else |
1642 | length = rx_count; |
1643 | d->status = d_status; |
1644 | buf = urb->transfer_dma + d->offset; |
1645 | } else { |
1646 | length = rx_count; |
1647 | buf = urb->transfer_dma + urb->actual_length; |
1648 | } |
1649 | |
1650 | channel->desired_mode = 0; |
1651 | #ifdef USE_MODE1 |
1652 | /* because of the issue below, mode 1 will |
1653 | * only rarely behave with correct semantics. |
1654 | */ |
1655 | if ((urb->transfer_flags & URB_SHORT_NOT_OK) |
1656 | && (urb->transfer_buffer_length - urb->actual_length) |
1657 | > qh->maxpacket) |
1658 | channel->desired_mode = 1; |
1659 | if (rx_count < hw_ep->max_packet_sz_rx) { |
1660 | length = rx_count; |
1661 | channel->desired_mode = 0; |
1662 | } else { |
1663 | length = urb->transfer_buffer_length; |
1664 | } |
1665 | #endif |
1666 | |
1667 | /* See comments above on disadvantages of using mode 1 */ |
1668 | val = musb_readw(epio, MUSB_RXCSR); |
1669 | val &= ~MUSB_RXCSR_H_REQPKT; |
1670 | |
1671 | if (channel->desired_mode == 0) |
1672 | val &= ~MUSB_RXCSR_H_AUTOREQ; |
1673 | else |
1674 | val |= MUSB_RXCSR_H_AUTOREQ; |
1675 | val |= MUSB_RXCSR_DMAENAB; |
1676 | |
1677 | /* autoclear shouldn't be set in high bandwidth */ |
1678 | if (qh->hb_mult == 1) |
1679 | val |= MUSB_RXCSR_AUTOCLEAR; |
1680 | |
1681 | musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val); |
1682 | |
1683 | /* REVISIT if when actual_length != 0, |
1684 | * transfer_buffer_length needs to be |
1685 | * adjusted first... |
1686 | */ |
1687 | done = dma->channel_program(channel, qh->maxpacket, |
1688 | channel->desired_mode, |
1689 | buf, length); |
1690 | |
1691 | if (!done) { |
1692 | dma->channel_release(channel); |
1693 | hw_ep->rx_channel = NULL; |
1694 | channel = NULL; |
1695 | val = musb_readw(epio, MUSB_RXCSR); |
1696 | val &= ~(MUSB_RXCSR_DMAENAB |
1697 | | MUSB_RXCSR_H_AUTOREQ |
1698 | | MUSB_RXCSR_AUTOCLEAR); |
1699 | musb_writew(epio, MUSB_RXCSR, val); |
1700 | } |
1701 | |
1702 | return done; |
1703 | } |
1704 | #else |
1705 | static inline int musb_rx_dma_inventra_cppi41(struct dma_controller *dma, |
1706 | struct musb_hw_ep *hw_ep, |
1707 | struct musb_qh *qh, |
1708 | struct urb *urb, |
1709 | size_t len) |
1710 | { |
1711 | return false; |
1712 | } |
1713 | |
1714 | static inline int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma, |
1715 | struct musb_hw_ep *hw_ep, |
1716 | struct musb_qh *qh, |
1717 | struct urb *urb, |
1718 | size_t len, |
1719 | u8 iso_err) |
1720 | { |
1721 | return false; |
1722 | } |
1723 | #endif |
1724 | |
1725 | /* |
1726 | * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso, |
1727 | * and high-bandwidth IN transfer cases. |
1728 | */ |
1729 | void musb_host_rx(struct musb *musb, u8 epnum) |
1730 | { |
1731 | struct urb *urb; |
1732 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; |
1733 | struct dma_controller *c = musb->dma_controller; |
1734 | void __iomem *epio = hw_ep->regs; |
1735 | struct musb_qh *qh = hw_ep->in_qh; |
1736 | size_t xfer_len; |
1737 | void __iomem *mbase = musb->mregs; |
1738 | u16 rx_csr, val; |
1739 | bool iso_err = false; |
1740 | bool done = false; |
1741 | u32 status; |
1742 | struct dma_channel *dma; |
1743 | unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG; |
1744 | |
1745 | musb_ep_select(mbase, epnum); |
1746 | |
1747 | urb = next_urb(qh); |
1748 | dma = is_dma_capable() ? hw_ep->rx_channel : NULL; |
1749 | status = 0; |
1750 | xfer_len = 0; |
1751 | |
1752 | rx_csr = musb_readw(epio, MUSB_RXCSR); |
1753 | val = rx_csr; |
1754 | |
1755 | if (unlikely(!urb)) { |
1756 | /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least |
1757 | * usbtest #11 (unlinks) triggers it regularly, sometimes |
1758 | * with fifo full. (Only with DMA??) |
1759 | */ |
1760 | musb_dbg(musb, fmt: "BOGUS RX%d ready, csr %04x, count %d" , |
1761 | epnum, val, musb_readw(epio, MUSB_RXCOUNT)); |
1762 | musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); |
1763 | return; |
1764 | } |
1765 | |
1766 | trace_musb_urb_rx(musb, urb); |
1767 | |
1768 | /* check for errors, concurrent stall & unlink is not really |
1769 | * handled yet! */ |
1770 | if (rx_csr & MUSB_RXCSR_H_RXSTALL) { |
1771 | musb_dbg(musb, fmt: "RX end %d STALL" , epnum); |
1772 | |
1773 | /* stall; record URB status */ |
1774 | status = -EPIPE; |
1775 | |
1776 | } else if (rx_csr & MUSB_RXCSR_H_ERROR) { |
1777 | dev_err(musb->controller, "ep%d RX three-strikes error" , epnum); |
1778 | |
1779 | /* |
1780 | * The three-strikes error could only happen when the USB |
1781 | * device is not accessible, for example detached or powered |
1782 | * off. So return the fatal error -ESHUTDOWN so hopefully the |
1783 | * USB device drivers won't immediately resubmit the same URB. |
1784 | */ |
1785 | status = -ESHUTDOWN; |
1786 | musb_writeb(epio, MUSB_RXINTERVAL, 0); |
1787 | |
1788 | rx_csr &= ~MUSB_RXCSR_H_ERROR; |
1789 | musb_writew(epio, MUSB_RXCSR, rx_csr); |
1790 | |
1791 | } else if (rx_csr & MUSB_RXCSR_DATAERROR) { |
1792 | |
1793 | if (USB_ENDPOINT_XFER_ISOC != qh->type) { |
1794 | musb_dbg(musb, fmt: "RX end %d NAK timeout" , epnum); |
1795 | |
1796 | /* NOTE: NAKing is *NOT* an error, so we want to |
1797 | * continue. Except ... if there's a request for |
1798 | * another QH, use that instead of starving it. |
1799 | * |
1800 | * Devices like Ethernet and serial adapters keep |
1801 | * reads posted at all times, which will starve |
1802 | * other devices without this logic. |
1803 | */ |
1804 | if (usb_pipebulk(urb->pipe) |
1805 | && qh->mux == 1 |
1806 | && !list_is_singular(head: &musb->in_bulk)) { |
1807 | musb_bulk_nak_timeout(musb, ep: hw_ep, is_in: 1); |
1808 | return; |
1809 | } |
1810 | musb_ep_select(mbase, epnum); |
1811 | rx_csr |= MUSB_RXCSR_H_WZC_BITS; |
1812 | rx_csr &= ~MUSB_RXCSR_DATAERROR; |
1813 | musb_writew(epio, MUSB_RXCSR, rx_csr); |
1814 | |
1815 | goto finish; |
1816 | } else { |
1817 | musb_dbg(musb, fmt: "RX end %d ISO data error" , epnum); |
1818 | /* packet error reported later */ |
1819 | iso_err = true; |
1820 | } |
1821 | } else if (rx_csr & MUSB_RXCSR_INCOMPRX) { |
1822 | musb_dbg(musb, fmt: "end %d high bandwidth incomplete ISO packet RX" , |
1823 | epnum); |
1824 | status = -EPROTO; |
1825 | } |
1826 | |
1827 | /* faults abort the transfer */ |
1828 | if (status) { |
1829 | /* clean up dma and collect transfer count */ |
1830 | if (dma_channel_status(c: dma) == MUSB_DMA_STATUS_BUSY) { |
1831 | dma->status = MUSB_DMA_STATUS_CORE_ABORT; |
1832 | musb->dma_controller->channel_abort(dma); |
1833 | xfer_len = dma->actual_len; |
1834 | } |
1835 | musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); |
1836 | musb_writeb(epio, MUSB_RXINTERVAL, 0); |
1837 | done = true; |
1838 | goto finish; |
1839 | } |
1840 | |
1841 | if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) { |
1842 | /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */ |
1843 | ERR("RX%d dma busy, csr %04x\n" , epnum, rx_csr); |
1844 | goto finish; |
1845 | } |
1846 | |
1847 | /* thorough shutdown for now ... given more precise fault handling |
1848 | * and better queueing support, we might keep a DMA pipeline going |
1849 | * while processing this irq for earlier completions. |
1850 | */ |
1851 | |
1852 | /* FIXME this is _way_ too much in-line logic for Mentor DMA */ |
1853 | if (!musb_dma_inventra(musb) && !musb_dma_ux500(musb) && |
1854 | (rx_csr & MUSB_RXCSR_H_REQPKT)) { |
1855 | /* REVISIT this happened for a while on some short reads... |
1856 | * the cleanup still needs investigation... looks bad... |
1857 | * and also duplicates dma cleanup code above ... plus, |
1858 | * shouldn't this be the "half full" double buffer case? |
1859 | */ |
1860 | if (dma_channel_status(c: dma) == MUSB_DMA_STATUS_BUSY) { |
1861 | dma->status = MUSB_DMA_STATUS_CORE_ABORT; |
1862 | musb->dma_controller->channel_abort(dma); |
1863 | xfer_len = dma->actual_len; |
1864 | done = true; |
1865 | } |
1866 | |
1867 | musb_dbg(musb, fmt: "RXCSR%d %04x, reqpkt, len %zu%s" , epnum, rx_csr, |
1868 | xfer_len, dma ? ", dma" : "" ); |
1869 | rx_csr &= ~MUSB_RXCSR_H_REQPKT; |
1870 | |
1871 | musb_ep_select(mbase, epnum); |
1872 | musb_writew(epio, MUSB_RXCSR, |
1873 | MUSB_RXCSR_H_WZC_BITS | rx_csr); |
1874 | } |
1875 | |
1876 | if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) { |
1877 | xfer_len = dma->actual_len; |
1878 | |
1879 | val &= ~(MUSB_RXCSR_DMAENAB |
1880 | | MUSB_RXCSR_H_AUTOREQ |
1881 | | MUSB_RXCSR_AUTOCLEAR |
1882 | | MUSB_RXCSR_RXPKTRDY); |
1883 | musb_writew(hw_ep->regs, MUSB_RXCSR, val); |
1884 | |
1885 | if (musb_dma_inventra(musb) || musb_dma_ux500(musb) || |
1886 | musb_dma_cppi41(musb)) { |
1887 | done = musb_rx_dma_inventra_cppi41(dma: c, hw_ep, qh, urb, len: xfer_len); |
1888 | musb_dbg(musb: hw_ep->musb, |
1889 | fmt: "ep %d dma %s, rxcsr %04x, rxcount %d" , |
1890 | epnum, done ? "off" : "reset" , |
1891 | musb_readw(epio, MUSB_RXCSR), |
1892 | musb_readw(epio, MUSB_RXCOUNT)); |
1893 | } else { |
1894 | done = true; |
1895 | } |
1896 | |
1897 | } else if (urb->status == -EINPROGRESS) { |
1898 | /* if no errors, be sure a packet is ready for unloading */ |
1899 | if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) { |
1900 | status = -EPROTO; |
1901 | ERR("Rx interrupt with no errors or packet!\n" ); |
1902 | |
1903 | /* FIXME this is another "SHOULD NEVER HAPPEN" */ |
1904 | |
1905 | /* SCRUB (RX) */ |
1906 | /* do the proper sequence to abort the transfer */ |
1907 | musb_ep_select(mbase, epnum); |
1908 | val &= ~MUSB_RXCSR_H_REQPKT; |
1909 | musb_writew(epio, MUSB_RXCSR, val); |
1910 | goto finish; |
1911 | } |
1912 | |
1913 | /* we are expecting IN packets */ |
1914 | if ((musb_dma_inventra(musb) || musb_dma_ux500(musb) || |
1915 | musb_dma_cppi41(musb)) && dma) { |
1916 | musb_dbg(musb: hw_ep->musb, |
1917 | fmt: "RX%d count %d, buffer 0x%llx len %d/%d" , |
1918 | epnum, musb_readw(epio, MUSB_RXCOUNT), |
1919 | (unsigned long long) urb->transfer_dma |
1920 | + urb->actual_length, |
1921 | qh->offset, |
1922 | urb->transfer_buffer_length); |
1923 | |
1924 | if (musb_rx_dma_in_inventra_cppi41(dma: c, hw_ep, qh, urb, |
1925 | len: xfer_len, iso_err)) |
1926 | goto finish; |
1927 | else |
1928 | dev_err(musb->controller, "error: rx_dma failed\n" ); |
1929 | } |
1930 | |
1931 | if (!dma) { |
1932 | unsigned int received_len; |
1933 | |
1934 | /* Unmap the buffer so that CPU can use it */ |
1935 | usb_hcd_unmap_urb_for_dma(musb->hcd, urb); |
1936 | |
1937 | /* |
1938 | * We need to map sg if the transfer_buffer is |
1939 | * NULL. |
1940 | */ |
1941 | if (!urb->transfer_buffer) { |
1942 | qh->use_sg = true; |
1943 | sg_miter_start(miter: &qh->sg_miter, sgl: urb->sg, nents: 1, |
1944 | flags: sg_flags); |
1945 | } |
1946 | |
1947 | if (qh->use_sg) { |
1948 | if (!sg_miter_next(miter: &qh->sg_miter)) { |
1949 | dev_err(musb->controller, "error: sg list empty\n" ); |
1950 | sg_miter_stop(miter: &qh->sg_miter); |
1951 | status = -EINVAL; |
1952 | done = true; |
1953 | goto finish; |
1954 | } |
1955 | urb->transfer_buffer = qh->sg_miter.addr; |
1956 | received_len = urb->actual_length; |
1957 | qh->offset = 0x0; |
1958 | done = musb_host_packet_rx(musb, urb, epnum, |
1959 | iso_err); |
1960 | /* Calculate the number of bytes received */ |
1961 | received_len = urb->actual_length - |
1962 | received_len; |
1963 | qh->sg_miter.consumed = received_len; |
1964 | sg_miter_stop(miter: &qh->sg_miter); |
1965 | } else { |
1966 | done = musb_host_packet_rx(musb, urb, |
1967 | epnum, iso_err); |
1968 | } |
1969 | musb_dbg(musb, fmt: "read %spacket" , done ? "last " : "" ); |
1970 | } |
1971 | } |
1972 | |
1973 | finish: |
1974 | urb->actual_length += xfer_len; |
1975 | qh->offset += xfer_len; |
1976 | if (done) { |
1977 | if (qh->use_sg) { |
1978 | qh->use_sg = false; |
1979 | urb->transfer_buffer = NULL; |
1980 | } |
1981 | |
1982 | if (urb->status == -EINPROGRESS) |
1983 | urb->status = status; |
1984 | musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN); |
1985 | } |
1986 | } |
1987 | |
1988 | /* schedule nodes correspond to peripheral endpoints, like an OHCI QH. |
1989 | * the software schedule associates multiple such nodes with a given |
1990 | * host side hardware endpoint + direction; scheduling may activate |
1991 | * that hardware endpoint. |
1992 | */ |
1993 | static int musb_schedule( |
1994 | struct musb *musb, |
1995 | struct musb_qh *qh, |
1996 | int is_in) |
1997 | { |
1998 | int idle = 0; |
1999 | int best_diff; |
2000 | int best_end, epnum; |
2001 | struct musb_hw_ep *hw_ep = NULL; |
2002 | struct list_head *head = NULL; |
2003 | u8 toggle; |
2004 | u8 txtype; |
2005 | struct urb *urb = next_urb(qh); |
2006 | |
2007 | /* use fixed hardware for control and bulk */ |
2008 | if (qh->type == USB_ENDPOINT_XFER_CONTROL) { |
2009 | head = &musb->control; |
2010 | hw_ep = musb->control_ep; |
2011 | goto success; |
2012 | } |
2013 | |
2014 | /* else, periodic transfers get muxed to other endpoints */ |
2015 | |
2016 | /* |
2017 | * We know this qh hasn't been scheduled, so all we need to do |
2018 | * is choose which hardware endpoint to put it on ... |
2019 | * |
2020 | * REVISIT what we really want here is a regular schedule tree |
2021 | * like e.g. OHCI uses. |
2022 | */ |
2023 | best_diff = 4096; |
2024 | best_end = -1; |
2025 | |
2026 | for (epnum = 1, hw_ep = musb->endpoints + 1; |
2027 | epnum < musb->nr_endpoints; |
2028 | epnum++, hw_ep++) { |
2029 | int diff; |
2030 | |
2031 | if (musb_ep_get_qh(ep: hw_ep, is_in) != NULL) |
2032 | continue; |
2033 | |
2034 | if (hw_ep == musb->bulk_ep) |
2035 | continue; |
2036 | |
2037 | if (is_in) |
2038 | diff = hw_ep->max_packet_sz_rx; |
2039 | else |
2040 | diff = hw_ep->max_packet_sz_tx; |
2041 | diff -= (qh->maxpacket * qh->hb_mult); |
2042 | |
2043 | if (diff >= 0 && best_diff > diff) { |
2044 | |
2045 | /* |
2046 | * Mentor controller has a bug in that if we schedule |
2047 | * a BULK Tx transfer on an endpoint that had earlier |
2048 | * handled ISOC then the BULK transfer has to start on |
2049 | * a zero toggle. If the BULK transfer starts on a 1 |
2050 | * toggle then this transfer will fail as the mentor |
2051 | * controller starts the Bulk transfer on a 0 toggle |
2052 | * irrespective of the programming of the toggle bits |
2053 | * in the TXCSR register. Check for this condition |
2054 | * while allocating the EP for a Tx Bulk transfer. If |
2055 | * so skip this EP. |
2056 | */ |
2057 | hw_ep = musb->endpoints + epnum; |
2058 | toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in); |
2059 | txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE) |
2060 | >> 4) & 0x3; |
2061 | if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) && |
2062 | toggle && (txtype == USB_ENDPOINT_XFER_ISOC)) |
2063 | continue; |
2064 | |
2065 | best_diff = diff; |
2066 | best_end = epnum; |
2067 | } |
2068 | } |
2069 | /* use bulk reserved ep1 if no other ep is free */ |
2070 | if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) { |
2071 | hw_ep = musb->bulk_ep; |
2072 | if (is_in) |
2073 | head = &musb->in_bulk; |
2074 | else |
2075 | head = &musb->out_bulk; |
2076 | |
2077 | /* Enable bulk RX/TX NAK timeout scheme when bulk requests are |
2078 | * multiplexed. This scheme does not work in high speed to full |
2079 | * speed scenario as NAK interrupts are not coming from a |
2080 | * full speed device connected to a high speed device. |
2081 | * NAK timeout interval is 8 (128 uframe or 16ms) for HS and |
2082 | * 4 (8 frame or 8ms) for FS device. |
2083 | */ |
2084 | if (qh->dev) |
2085 | qh->intv_reg = |
2086 | (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4; |
2087 | goto success; |
2088 | } else if (best_end < 0) { |
2089 | dev_err(musb->controller, |
2090 | "%s hwep alloc failed for %dx%d\n" , |
2091 | musb_ep_xfertype_string(qh->type), |
2092 | qh->hb_mult, qh->maxpacket); |
2093 | return -ENOSPC; |
2094 | } |
2095 | |
2096 | idle = 1; |
2097 | qh->mux = 0; |
2098 | hw_ep = musb->endpoints + best_end; |
2099 | musb_dbg(musb, fmt: "qh %p periodic slot %d" , qh, best_end); |
2100 | success: |
2101 | if (head) { |
2102 | idle = list_empty(head); |
2103 | list_add_tail(new: &qh->ring, head); |
2104 | qh->mux = 1; |
2105 | } |
2106 | qh->hw_ep = hw_ep; |
2107 | qh->hep->hcpriv = qh; |
2108 | if (idle) |
2109 | musb_start_urb(musb, is_in, qh); |
2110 | return 0; |
2111 | } |
2112 | |
2113 | static int musb_urb_enqueue( |
2114 | struct usb_hcd *hcd, |
2115 | struct urb *urb, |
2116 | gfp_t mem_flags) |
2117 | { |
2118 | unsigned long flags; |
2119 | struct musb *musb = hcd_to_musb(hcd); |
2120 | struct usb_host_endpoint *hep = urb->ep; |
2121 | struct musb_qh *qh; |
2122 | struct usb_endpoint_descriptor *epd = &hep->desc; |
2123 | int ret; |
2124 | unsigned type_reg; |
2125 | unsigned interval; |
2126 | |
2127 | /* host role must be active */ |
2128 | if (!is_host_active(musb) || !musb->is_active) |
2129 | return -ENODEV; |
2130 | |
2131 | trace_musb_urb_enq(musb, urb); |
2132 | |
2133 | spin_lock_irqsave(&musb->lock, flags); |
2134 | ret = usb_hcd_link_urb_to_ep(hcd, urb); |
2135 | qh = ret ? NULL : hep->hcpriv; |
2136 | if (qh) |
2137 | urb->hcpriv = qh; |
2138 | spin_unlock_irqrestore(lock: &musb->lock, flags); |
2139 | |
2140 | /* DMA mapping was already done, if needed, and this urb is on |
2141 | * hep->urb_list now ... so we're done, unless hep wasn't yet |
2142 | * scheduled onto a live qh. |
2143 | * |
2144 | * REVISIT best to keep hep->hcpriv valid until the endpoint gets |
2145 | * disabled, testing for empty qh->ring and avoiding qh setup costs |
2146 | * except for the first urb queued after a config change. |
2147 | */ |
2148 | if (qh || ret) |
2149 | return ret; |
2150 | |
2151 | /* Allocate and initialize qh, minimizing the work done each time |
2152 | * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it. |
2153 | * |
2154 | * REVISIT consider a dedicated qh kmem_cache, so it's harder |
2155 | * for bugs in other kernel code to break this driver... |
2156 | */ |
2157 | qh = kzalloc(size: sizeof *qh, flags: mem_flags); |
2158 | if (!qh) { |
2159 | spin_lock_irqsave(&musb->lock, flags); |
2160 | usb_hcd_unlink_urb_from_ep(hcd, urb); |
2161 | spin_unlock_irqrestore(lock: &musb->lock, flags); |
2162 | return -ENOMEM; |
2163 | } |
2164 | |
2165 | qh->hep = hep; |
2166 | qh->dev = urb->dev; |
2167 | INIT_LIST_HEAD(list: &qh->ring); |
2168 | qh->is_ready = 1; |
2169 | |
2170 | qh->maxpacket = usb_endpoint_maxp(epd); |
2171 | qh->type = usb_endpoint_type(epd); |
2172 | |
2173 | /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier. |
2174 | * Some musb cores don't support high bandwidth ISO transfers; and |
2175 | * we don't (yet!) support high bandwidth interrupt transfers. |
2176 | */ |
2177 | qh->hb_mult = usb_endpoint_maxp_mult(epd); |
2178 | if (qh->hb_mult > 1) { |
2179 | int ok = (qh->type == USB_ENDPOINT_XFER_ISOC); |
2180 | |
2181 | if (ok) |
2182 | ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx) |
2183 | || (usb_pipeout(urb->pipe) && musb->hb_iso_tx); |
2184 | if (!ok) { |
2185 | dev_err(musb->controller, |
2186 | "high bandwidth %s (%dx%d) not supported\n" , |
2187 | musb_ep_xfertype_string(qh->type), |
2188 | qh->hb_mult, qh->maxpacket & 0x7ff); |
2189 | ret = -EMSGSIZE; |
2190 | goto done; |
2191 | } |
2192 | qh->maxpacket &= 0x7ff; |
2193 | } |
2194 | |
2195 | qh->epnum = usb_endpoint_num(epd); |
2196 | |
2197 | /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */ |
2198 | qh->addr_reg = (u8) usb_pipedevice(urb->pipe); |
2199 | |
2200 | /* precompute rxtype/txtype/type0 register */ |
2201 | type_reg = (qh->type << 4) | qh->epnum; |
2202 | switch (urb->dev->speed) { |
2203 | case USB_SPEED_LOW: |
2204 | type_reg |= 0xc0; |
2205 | break; |
2206 | case USB_SPEED_FULL: |
2207 | type_reg |= 0x80; |
2208 | break; |
2209 | default: |
2210 | type_reg |= 0x40; |
2211 | } |
2212 | qh->type_reg = type_reg; |
2213 | |
2214 | /* Precompute RXINTERVAL/TXINTERVAL register */ |
2215 | switch (qh->type) { |
2216 | case USB_ENDPOINT_XFER_INT: |
2217 | /* |
2218 | * Full/low speeds use the linear encoding, |
2219 | * high speed uses the logarithmic encoding. |
2220 | */ |
2221 | if (urb->dev->speed <= USB_SPEED_FULL) { |
2222 | interval = max_t(u8, epd->bInterval, 1); |
2223 | break; |
2224 | } |
2225 | fallthrough; |
2226 | case USB_ENDPOINT_XFER_ISOC: |
2227 | /* ISO always uses logarithmic encoding */ |
2228 | interval = min_t(u8, epd->bInterval, 16); |
2229 | break; |
2230 | default: |
2231 | /* REVISIT we actually want to use NAK limits, hinting to the |
2232 | * transfer scheduling logic to try some other qh, e.g. try |
2233 | * for 2 msec first: |
2234 | * |
2235 | * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2; |
2236 | * |
2237 | * The downside of disabling this is that transfer scheduling |
2238 | * gets VERY unfair for nonperiodic transfers; a misbehaving |
2239 | * peripheral could make that hurt. That's perfectly normal |
2240 | * for reads from network or serial adapters ... so we have |
2241 | * partial NAKlimit support for bulk RX. |
2242 | * |
2243 | * The upside of disabling it is simpler transfer scheduling. |
2244 | */ |
2245 | interval = 0; |
2246 | } |
2247 | qh->intv_reg = interval; |
2248 | |
2249 | /* precompute addressing for external hub/tt ports */ |
2250 | if (musb->is_multipoint) { |
2251 | struct usb_device *parent = urb->dev->parent; |
2252 | |
2253 | if (parent != hcd->self.root_hub) { |
2254 | qh->h_addr_reg = (u8) parent->devnum; |
2255 | |
2256 | /* set up tt info if needed */ |
2257 | if (urb->dev->tt) { |
2258 | qh->h_port_reg = (u8) urb->dev->ttport; |
2259 | if (urb->dev->tt->hub) |
2260 | qh->h_addr_reg = |
2261 | (u8) urb->dev->tt->hub->devnum; |
2262 | if (urb->dev->tt->multi) |
2263 | qh->h_addr_reg |= 0x80; |
2264 | } |
2265 | } |
2266 | } |
2267 | |
2268 | /* invariant: hep->hcpriv is null OR the qh that's already scheduled. |
2269 | * until we get real dma queues (with an entry for each urb/buffer), |
2270 | * we only have work to do in the former case. |
2271 | */ |
2272 | spin_lock_irqsave(&musb->lock, flags); |
2273 | if (hep->hcpriv || !next_urb(qh)) { |
2274 | /* some concurrent activity submitted another urb to hep... |
2275 | * odd, rare, error prone, but legal. |
2276 | */ |
2277 | kfree(objp: qh); |
2278 | qh = NULL; |
2279 | ret = 0; |
2280 | } else |
2281 | ret = musb_schedule(musb, qh, |
2282 | is_in: epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK); |
2283 | |
2284 | if (ret == 0) { |
2285 | urb->hcpriv = qh; |
2286 | /* FIXME set urb->start_frame for iso/intr, it's tested in |
2287 | * musb_start_urb(), but otherwise only konicawc cares ... |
2288 | */ |
2289 | } |
2290 | spin_unlock_irqrestore(lock: &musb->lock, flags); |
2291 | |
2292 | done: |
2293 | if (ret != 0) { |
2294 | spin_lock_irqsave(&musb->lock, flags); |
2295 | usb_hcd_unlink_urb_from_ep(hcd, urb); |
2296 | spin_unlock_irqrestore(lock: &musb->lock, flags); |
2297 | kfree(objp: qh); |
2298 | } |
2299 | return ret; |
2300 | } |
2301 | |
2302 | |
2303 | /* |
2304 | * abort a transfer that's at the head of a hardware queue. |
2305 | * called with controller locked, irqs blocked |
2306 | * that hardware queue advances to the next transfer, unless prevented |
2307 | */ |
2308 | static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh) |
2309 | { |
2310 | struct musb_hw_ep *ep = qh->hw_ep; |
2311 | struct musb *musb = ep->musb; |
2312 | void __iomem *epio = ep->regs; |
2313 | unsigned hw_end = ep->epnum; |
2314 | void __iomem *regs = ep->musb->mregs; |
2315 | int is_in = usb_pipein(urb->pipe); |
2316 | int status = 0; |
2317 | u16 csr; |
2318 | struct dma_channel *dma = NULL; |
2319 | |
2320 | musb_ep_select(regs, hw_end); |
2321 | |
2322 | if (is_dma_capable()) { |
2323 | dma = is_in ? ep->rx_channel : ep->tx_channel; |
2324 | if (dma) { |
2325 | status = ep->musb->dma_controller->channel_abort(dma); |
2326 | musb_dbg(musb, fmt: "abort %cX%d DMA for urb %p --> %d" , |
2327 | is_in ? 'R' : 'T', ep->epnum, |
2328 | urb, status); |
2329 | urb->actual_length += dma->actual_len; |
2330 | } |
2331 | } |
2332 | |
2333 | /* turn off DMA requests, discard state, stop polling ... */ |
2334 | if (ep->epnum && is_in) { |
2335 | /* giveback saves bulk toggle */ |
2336 | csr = musb_h_flush_rxfifo(hw_ep: ep, csr: 0); |
2337 | |
2338 | /* clear the endpoint's irq status here to avoid bogus irqs */ |
2339 | if (is_dma_capable() && dma) |
2340 | musb_platform_clear_ep_rxintr(musb, epnum: ep->epnum); |
2341 | } else if (ep->epnum) { |
2342 | musb_h_tx_flush_fifo(ep); |
2343 | csr = musb_readw(epio, MUSB_TXCSR); |
2344 | csr &= ~(MUSB_TXCSR_AUTOSET |
2345 | | MUSB_TXCSR_DMAENAB |
2346 | | MUSB_TXCSR_H_RXSTALL |
2347 | | MUSB_TXCSR_H_NAKTIMEOUT |
2348 | | MUSB_TXCSR_H_ERROR |
2349 | | MUSB_TXCSR_TXPKTRDY); |
2350 | musb_writew(epio, MUSB_TXCSR, csr); |
2351 | /* REVISIT may need to clear FLUSHFIFO ... */ |
2352 | musb_writew(epio, MUSB_TXCSR, csr); |
2353 | /* flush cpu writebuffer */ |
2354 | csr = musb_readw(epio, MUSB_TXCSR); |
2355 | } else { |
2356 | musb_h_ep0_flush_fifo(ep); |
2357 | } |
2358 | if (status == 0) |
2359 | musb_advance_schedule(musb: ep->musb, urb, hw_ep: ep, is_in); |
2360 | return status; |
2361 | } |
2362 | |
2363 | static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) |
2364 | { |
2365 | struct musb *musb = hcd_to_musb(hcd); |
2366 | struct musb_qh *qh; |
2367 | unsigned long flags; |
2368 | int is_in = usb_pipein(urb->pipe); |
2369 | int ret; |
2370 | |
2371 | trace_musb_urb_deq(musb, urb); |
2372 | |
2373 | spin_lock_irqsave(&musb->lock, flags); |
2374 | ret = usb_hcd_check_unlink_urb(hcd, urb, status); |
2375 | if (ret) |
2376 | goto done; |
2377 | |
2378 | qh = urb->hcpriv; |
2379 | if (!qh) |
2380 | goto done; |
2381 | |
2382 | /* |
2383 | * Any URB not actively programmed into endpoint hardware can be |
2384 | * immediately given back; that's any URB not at the head of an |
2385 | * endpoint queue, unless someday we get real DMA queues. And even |
2386 | * if it's at the head, it might not be known to the hardware... |
2387 | * |
2388 | * Otherwise abort current transfer, pending DMA, etc.; urb->status |
2389 | * has already been updated. This is a synchronous abort; it'd be |
2390 | * OK to hold off until after some IRQ, though. |
2391 | * |
2392 | * NOTE: qh is invalid unless !list_empty(&hep->urb_list) |
2393 | */ |
2394 | if (!qh->is_ready |
2395 | || urb->urb_list.prev != &qh->hep->urb_list |
2396 | || musb_ep_get_qh(ep: qh->hw_ep, is_in) != qh) { |
2397 | int ready = qh->is_ready; |
2398 | |
2399 | qh->is_ready = 0; |
2400 | musb_giveback(musb, urb, status: 0); |
2401 | qh->is_ready = ready; |
2402 | |
2403 | /* If nothing else (usually musb_giveback) is using it |
2404 | * and its URB list has emptied, recycle this qh. |
2405 | */ |
2406 | if (ready && list_empty(head: &qh->hep->urb_list)) { |
2407 | musb_ep_set_qh(ep: qh->hw_ep, is_in, NULL); |
2408 | qh->hep->hcpriv = NULL; |
2409 | list_del(entry: &qh->ring); |
2410 | kfree(objp: qh); |
2411 | } |
2412 | } else |
2413 | ret = musb_cleanup_urb(urb, qh); |
2414 | done: |
2415 | spin_unlock_irqrestore(lock: &musb->lock, flags); |
2416 | return ret; |
2417 | } |
2418 | |
2419 | /* disable an endpoint */ |
2420 | static void |
2421 | musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep) |
2422 | { |
2423 | u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN; |
2424 | unsigned long flags; |
2425 | struct musb *musb = hcd_to_musb(hcd); |
2426 | struct musb_qh *qh; |
2427 | struct urb *urb; |
2428 | |
2429 | spin_lock_irqsave(&musb->lock, flags); |
2430 | |
2431 | qh = hep->hcpriv; |
2432 | if (qh == NULL) |
2433 | goto exit; |
2434 | |
2435 | /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */ |
2436 | |
2437 | /* Kick the first URB off the hardware, if needed */ |
2438 | qh->is_ready = 0; |
2439 | if (musb_ep_get_qh(ep: qh->hw_ep, is_in) == qh) { |
2440 | urb = next_urb(qh); |
2441 | |
2442 | /* make software (then hardware) stop ASAP */ |
2443 | if (!urb->unlinked) |
2444 | urb->status = -ESHUTDOWN; |
2445 | |
2446 | /* cleanup */ |
2447 | musb_cleanup_urb(urb, qh); |
2448 | |
2449 | /* Then nuke all the others ... and advance the |
2450 | * queue on hw_ep (e.g. bulk ring) when we're done. |
2451 | */ |
2452 | while (!list_empty(head: &hep->urb_list)) { |
2453 | urb = next_urb(qh); |
2454 | urb->status = -ESHUTDOWN; |
2455 | musb_advance_schedule(musb, urb, hw_ep: qh->hw_ep, is_in); |
2456 | } |
2457 | } else { |
2458 | /* Just empty the queue; the hardware is busy with |
2459 | * other transfers, and since !qh->is_ready nothing |
2460 | * will activate any of these as it advances. |
2461 | */ |
2462 | while (!list_empty(head: &hep->urb_list)) |
2463 | musb_giveback(musb, urb: next_urb(qh), status: -ESHUTDOWN); |
2464 | |
2465 | hep->hcpriv = NULL; |
2466 | list_del(entry: &qh->ring); |
2467 | kfree(objp: qh); |
2468 | } |
2469 | exit: |
2470 | spin_unlock_irqrestore(lock: &musb->lock, flags); |
2471 | } |
2472 | |
2473 | static int musb_h_get_frame_number(struct usb_hcd *hcd) |
2474 | { |
2475 | struct musb *musb = hcd_to_musb(hcd); |
2476 | |
2477 | return musb_readw(musb->mregs, MUSB_FRAME); |
2478 | } |
2479 | |
2480 | static int musb_h_start(struct usb_hcd *hcd) |
2481 | { |
2482 | struct musb *musb = hcd_to_musb(hcd); |
2483 | |
2484 | /* NOTE: musb_start() is called when the hub driver turns |
2485 | * on port power, or when (OTG) peripheral starts. |
2486 | */ |
2487 | hcd->state = HC_STATE_RUNNING; |
2488 | musb->port1_status = 0; |
2489 | return 0; |
2490 | } |
2491 | |
2492 | static void musb_h_stop(struct usb_hcd *hcd) |
2493 | { |
2494 | musb_stop(musb: hcd_to_musb(hcd)); |
2495 | hcd->state = HC_STATE_HALT; |
2496 | } |
2497 | |
2498 | static int musb_bus_suspend(struct usb_hcd *hcd) |
2499 | { |
2500 | struct musb *musb = hcd_to_musb(hcd); |
2501 | u8 devctl; |
2502 | int ret; |
2503 | |
2504 | ret = musb_port_suspend(musb, do_suspend: true); |
2505 | if (ret) |
2506 | return ret; |
2507 | |
2508 | if (!is_host_active(musb)) |
2509 | return 0; |
2510 | |
2511 | switch (musb_get_state(musb)) { |
2512 | case OTG_STATE_A_SUSPEND: |
2513 | return 0; |
2514 | case OTG_STATE_A_WAIT_VRISE: |
2515 | /* ID could be grounded even if there's no device |
2516 | * on the other end of the cable. NOTE that the |
2517 | * A_WAIT_VRISE timers are messy with MUSB... |
2518 | */ |
2519 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); |
2520 | if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) |
2521 | musb_set_state(musb, otg_state: OTG_STATE_A_WAIT_BCON); |
2522 | break; |
2523 | default: |
2524 | break; |
2525 | } |
2526 | |
2527 | if (musb->is_active) { |
2528 | WARNING("trying to suspend as %s while active\n" , |
2529 | musb_otg_state_string(musb)); |
2530 | return -EBUSY; |
2531 | } else |
2532 | return 0; |
2533 | } |
2534 | |
2535 | static int musb_bus_resume(struct usb_hcd *hcd) |
2536 | { |
2537 | struct musb *musb = hcd_to_musb(hcd); |
2538 | |
2539 | if (musb->config && |
2540 | musb->config->host_port_deassert_reset_at_resume) |
2541 | musb_port_reset(musb, do_reset: false); |
2542 | |
2543 | return 0; |
2544 | } |
2545 | |
2546 | #ifndef CONFIG_MUSB_PIO_ONLY |
2547 | |
2548 | #define MUSB_USB_DMA_ALIGN 4 |
2549 | |
2550 | struct musb_temp_buffer { |
2551 | void *kmalloc_ptr; |
2552 | void *old_xfer_buffer; |
2553 | u8 data[]; |
2554 | }; |
2555 | |
2556 | static void musb_free_temp_buffer(struct urb *urb) |
2557 | { |
2558 | enum dma_data_direction dir; |
2559 | struct musb_temp_buffer *temp; |
2560 | size_t length; |
2561 | |
2562 | if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER)) |
2563 | return; |
2564 | |
2565 | dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; |
2566 | |
2567 | temp = container_of(urb->transfer_buffer, struct musb_temp_buffer, |
2568 | data); |
2569 | |
2570 | if (dir == DMA_FROM_DEVICE) { |
2571 | if (usb_pipeisoc(urb->pipe)) |
2572 | length = urb->transfer_buffer_length; |
2573 | else |
2574 | length = urb->actual_length; |
2575 | |
2576 | memcpy(temp->old_xfer_buffer, temp->data, length); |
2577 | } |
2578 | urb->transfer_buffer = temp->old_xfer_buffer; |
2579 | kfree(temp->kmalloc_ptr); |
2580 | |
2581 | urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER; |
2582 | } |
2583 | |
2584 | static int musb_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags) |
2585 | { |
2586 | enum dma_data_direction dir; |
2587 | struct musb_temp_buffer *temp; |
2588 | void *kmalloc_ptr; |
2589 | size_t kmalloc_size; |
2590 | |
2591 | if (urb->num_sgs || urb->sg || |
2592 | urb->transfer_buffer_length == 0 || |
2593 | !((uintptr_t)urb->transfer_buffer & (MUSB_USB_DMA_ALIGN - 1))) |
2594 | return 0; |
2595 | |
2596 | dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; |
2597 | |
2598 | /* Allocate a buffer with enough padding for alignment */ |
2599 | kmalloc_size = urb->transfer_buffer_length + |
2600 | sizeof(struct musb_temp_buffer) + MUSB_USB_DMA_ALIGN - 1; |
2601 | |
2602 | kmalloc_ptr = kmalloc(kmalloc_size, mem_flags); |
2603 | if (!kmalloc_ptr) |
2604 | return -ENOMEM; |
2605 | |
2606 | /* Position our struct temp_buffer such that data is aligned */ |
2607 | temp = PTR_ALIGN(kmalloc_ptr, MUSB_USB_DMA_ALIGN); |
2608 | |
2609 | |
2610 | temp->kmalloc_ptr = kmalloc_ptr; |
2611 | temp->old_xfer_buffer = urb->transfer_buffer; |
2612 | if (dir == DMA_TO_DEVICE) |
2613 | memcpy(temp->data, urb->transfer_buffer, |
2614 | urb->transfer_buffer_length); |
2615 | urb->transfer_buffer = temp->data; |
2616 | |
2617 | urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER; |
2618 | |
2619 | return 0; |
2620 | } |
2621 | |
2622 | static int musb_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, |
2623 | gfp_t mem_flags) |
2624 | { |
2625 | struct musb *musb = hcd_to_musb(hcd); |
2626 | int ret; |
2627 | |
2628 | /* |
2629 | * The DMA engine in RTL1.8 and above cannot handle |
2630 | * DMA addresses that are not aligned to a 4 byte boundary. |
2631 | * For such engine implemented (un)map_urb_for_dma hooks. |
2632 | * Do not use these hooks for RTL<1.8 |
2633 | */ |
2634 | if (musb->hwvers < MUSB_HWVERS_1800) |
2635 | return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); |
2636 | |
2637 | ret = musb_alloc_temp_buffer(urb, mem_flags); |
2638 | if (ret) |
2639 | return ret; |
2640 | |
2641 | ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); |
2642 | if (ret) |
2643 | musb_free_temp_buffer(urb); |
2644 | |
2645 | return ret; |
2646 | } |
2647 | |
2648 | static void musb_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) |
2649 | { |
2650 | struct musb *musb = hcd_to_musb(hcd); |
2651 | |
2652 | usb_hcd_unmap_urb_for_dma(hcd, urb); |
2653 | |
2654 | /* Do not use this hook for RTL<1.8 (see description above) */ |
2655 | if (musb->hwvers < MUSB_HWVERS_1800) |
2656 | return; |
2657 | |
2658 | musb_free_temp_buffer(urb); |
2659 | } |
2660 | #endif /* !CONFIG_MUSB_PIO_ONLY */ |
2661 | |
2662 | static const struct hc_driver musb_hc_driver = { |
2663 | .description = "musb-hcd" , |
2664 | .product_desc = "MUSB HDRC host driver" , |
2665 | .hcd_priv_size = sizeof(struct musb *), |
2666 | .flags = HCD_USB2 | HCD_DMA | HCD_MEMORY, |
2667 | |
2668 | /* not using irq handler or reset hooks from usbcore, since |
2669 | * those must be shared with peripheral code for OTG configs |
2670 | */ |
2671 | |
2672 | .start = musb_h_start, |
2673 | .stop = musb_h_stop, |
2674 | |
2675 | .get_frame_number = musb_h_get_frame_number, |
2676 | |
2677 | .urb_enqueue = musb_urb_enqueue, |
2678 | .urb_dequeue = musb_urb_dequeue, |
2679 | .endpoint_disable = musb_h_disable, |
2680 | |
2681 | #ifndef CONFIG_MUSB_PIO_ONLY |
2682 | .map_urb_for_dma = musb_map_urb_for_dma, |
2683 | .unmap_urb_for_dma = musb_unmap_urb_for_dma, |
2684 | #endif |
2685 | |
2686 | .hub_status_data = musb_hub_status_data, |
2687 | .hub_control = musb_hub_control, |
2688 | .bus_suspend = musb_bus_suspend, |
2689 | .bus_resume = musb_bus_resume, |
2690 | /* .start_port_reset = NULL, */ |
2691 | /* .hub_irq_enable = NULL, */ |
2692 | }; |
2693 | |
2694 | int musb_host_alloc(struct musb *musb) |
2695 | { |
2696 | struct device *dev = musb->controller; |
2697 | |
2698 | /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */ |
2699 | musb->hcd = usb_create_hcd(driver: &musb_hc_driver, dev, bus_name: dev_name(dev)); |
2700 | if (!musb->hcd) |
2701 | return -EINVAL; |
2702 | |
2703 | *musb->hcd->hcd_priv = (unsigned long) musb; |
2704 | musb->hcd->self.uses_pio_for_control = 1; |
2705 | musb->hcd->uses_new_polling = 1; |
2706 | musb->hcd->has_tt = 1; |
2707 | |
2708 | return 0; |
2709 | } |
2710 | |
2711 | void musb_host_cleanup(struct musb *musb) |
2712 | { |
2713 | if (musb->port_mode == MUSB_PERIPHERAL) |
2714 | return; |
2715 | usb_remove_hcd(hcd: musb->hcd); |
2716 | } |
2717 | |
2718 | void musb_host_free(struct musb *musb) |
2719 | { |
2720 | usb_put_hcd(hcd: musb->hcd); |
2721 | } |
2722 | |
2723 | int musb_host_setup(struct musb *musb, int power_budget) |
2724 | { |
2725 | int ret; |
2726 | struct usb_hcd *hcd = musb->hcd; |
2727 | |
2728 | if (musb->port_mode == MUSB_HOST) { |
2729 | MUSB_HST_MODE(musb); |
2730 | musb_set_state(musb, otg_state: OTG_STATE_A_IDLE); |
2731 | } |
2732 | |
2733 | if (musb->xceiv) { |
2734 | otg_set_host(otg: musb->xceiv->otg, host: &hcd->self); |
2735 | musb->xceiv->otg->host = &hcd->self; |
2736 | } else { |
2737 | phy_set_mode(musb->phy, PHY_MODE_USB_HOST); |
2738 | } |
2739 | |
2740 | /* don't support otg protocols */ |
2741 | hcd->self.otg_port = 0; |
2742 | hcd->power_budget = 2 * (power_budget ? : 250); |
2743 | hcd->skip_phy_initialization = 1; |
2744 | |
2745 | ret = usb_add_hcd(hcd, irqnum: 0, irqflags: 0); |
2746 | if (ret < 0) |
2747 | return ret; |
2748 | |
2749 | device_wakeup_enable(dev: hcd->self.controller); |
2750 | return 0; |
2751 | } |
2752 | |
2753 | void musb_host_resume_root_hub(struct musb *musb) |
2754 | { |
2755 | usb_hcd_resume_root_hub(hcd: musb->hcd); |
2756 | } |
2757 | |
2758 | void musb_host_poke_root_hub(struct musb *musb) |
2759 | { |
2760 | MUSB_HST_MODE(musb); |
2761 | if (musb->hcd->status_urb) |
2762 | usb_hcd_poll_rh_status(hcd: musb->hcd); |
2763 | else |
2764 | usb_hcd_resume_root_hub(hcd: musb->hcd); |
2765 | } |
2766 | |