1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Universal Host Controller Interface driver for USB. |
4 | * |
5 | * Maintainer: Alan Stern <stern@rowland.harvard.edu> |
6 | * |
7 | * (C) Copyright 1999 Linus Torvalds |
8 | * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com |
9 | * (C) Copyright 1999 Randy Dunlap |
10 | * (C) Copyright 1999 Georg Acher, acher@in.tum.de |
11 | * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de |
12 | * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch |
13 | * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at |
14 | * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface |
15 | * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com). |
16 | * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c) |
17 | * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu |
18 | */ |
19 | |
20 | |
21 | /* |
22 | * Technically, updating td->status here is a race, but it's not really a |
23 | * problem. The worst that can happen is that we set the IOC bit again |
24 | * generating a spurious interrupt. We could fix this by creating another |
25 | * QH and leaving the IOC bit always set, but then we would have to play |
26 | * games with the FSBR code to make sure we get the correct order in all |
27 | * the cases. I don't think it's worth the effort |
28 | */ |
29 | static void uhci_set_next_interrupt(struct uhci_hcd *uhci) |
30 | { |
31 | if (uhci->is_stopped) |
32 | mod_timer(timer: &uhci_to_hcd(uhci)->rh_timer, expires: jiffies); |
33 | uhci->term_td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC); |
34 | } |
35 | |
36 | static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci) |
37 | { |
38 | uhci->term_td->status &= ~cpu_to_hc32(uhci, TD_CTRL_IOC); |
39 | } |
40 | |
41 | |
42 | /* |
43 | * Full-Speed Bandwidth Reclamation (FSBR). |
44 | * We turn on FSBR whenever a queue that wants it is advancing, |
45 | * and leave it on for a short time thereafter. |
46 | */ |
47 | static void uhci_fsbr_on(struct uhci_hcd *uhci) |
48 | { |
49 | struct uhci_qh *lqh; |
50 | |
51 | /* The terminating skeleton QH always points back to the first |
52 | * FSBR QH. Make the last async QH point to the terminating |
53 | * skeleton QH. */ |
54 | uhci->fsbr_is_on = 1; |
55 | lqh = list_entry(uhci->skel_async_qh->node.prev, |
56 | struct uhci_qh, node); |
57 | lqh->link = LINK_TO_QH(uhci, uhci->skel_term_qh); |
58 | } |
59 | |
60 | static void uhci_fsbr_off(struct uhci_hcd *uhci) |
61 | { |
62 | struct uhci_qh *lqh; |
63 | |
64 | /* Remove the link from the last async QH to the terminating |
65 | * skeleton QH. */ |
66 | uhci->fsbr_is_on = 0; |
67 | lqh = list_entry(uhci->skel_async_qh->node.prev, |
68 | struct uhci_qh, node); |
69 | lqh->link = UHCI_PTR_TERM(uhci); |
70 | } |
71 | |
72 | static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb) |
73 | { |
74 | struct urb_priv *urbp = urb->hcpriv; |
75 | |
76 | urbp->fsbr = 1; |
77 | } |
78 | |
79 | static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp) |
80 | { |
81 | if (urbp->fsbr) { |
82 | uhci->fsbr_is_wanted = 1; |
83 | if (!uhci->fsbr_is_on) |
84 | uhci_fsbr_on(uhci); |
85 | else if (uhci->fsbr_expiring) { |
86 | uhci->fsbr_expiring = 0; |
87 | del_timer(timer: &uhci->fsbr_timer); |
88 | } |
89 | } |
90 | } |
91 | |
92 | static void uhci_fsbr_timeout(struct timer_list *t) |
93 | { |
94 | struct uhci_hcd *uhci = from_timer(uhci, t, fsbr_timer); |
95 | unsigned long flags; |
96 | |
97 | spin_lock_irqsave(&uhci->lock, flags); |
98 | if (uhci->fsbr_expiring) { |
99 | uhci->fsbr_expiring = 0; |
100 | uhci_fsbr_off(uhci); |
101 | } |
102 | spin_unlock_irqrestore(lock: &uhci->lock, flags); |
103 | } |
104 | |
105 | |
106 | static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci) |
107 | { |
108 | dma_addr_t dma_handle; |
109 | struct uhci_td *td; |
110 | |
111 | td = dma_pool_alloc(pool: uhci->td_pool, GFP_ATOMIC, handle: &dma_handle); |
112 | if (!td) |
113 | return NULL; |
114 | |
115 | td->dma_handle = dma_handle; |
116 | td->frame = -1; |
117 | |
118 | INIT_LIST_HEAD(list: &td->list); |
119 | INIT_LIST_HEAD(list: &td->fl_list); |
120 | |
121 | return td; |
122 | } |
123 | |
124 | static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td) |
125 | { |
126 | if (!list_empty(head: &td->list)) |
127 | dev_WARN(uhci_dev(uhci), "td %p still in list!\n" , td); |
128 | if (!list_empty(head: &td->fl_list)) |
129 | dev_WARN(uhci_dev(uhci), "td %p still in fl_list!\n" , td); |
130 | |
131 | dma_pool_free(pool: uhci->td_pool, vaddr: td, addr: td->dma_handle); |
132 | } |
133 | |
134 | static inline void uhci_fill_td(struct uhci_hcd *uhci, struct uhci_td *td, |
135 | u32 status, u32 token, u32 buffer) |
136 | { |
137 | td->status = cpu_to_hc32(uhci, x: status); |
138 | td->token = cpu_to_hc32(uhci, x: token); |
139 | td->buffer = cpu_to_hc32(uhci, x: buffer); |
140 | } |
141 | |
142 | static void uhci_add_td_to_urbp(struct uhci_td *td, struct urb_priv *urbp) |
143 | { |
144 | list_add_tail(new: &td->list, head: &urbp->td_list); |
145 | } |
146 | |
147 | static void uhci_remove_td_from_urbp(struct uhci_td *td) |
148 | { |
149 | list_del_init(entry: &td->list); |
150 | } |
151 | |
152 | /* |
153 | * We insert Isochronous URBs directly into the frame list at the beginning |
154 | */ |
155 | static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci, |
156 | struct uhci_td *td, unsigned ) |
157 | { |
158 | framenum &= (UHCI_NUMFRAMES - 1); |
159 | |
160 | td->frame = framenum; |
161 | |
162 | /* Is there a TD already mapped there? */ |
163 | if (uhci->frame_cpu[framenum]) { |
164 | struct uhci_td *ftd, *ltd; |
165 | |
166 | ftd = uhci->frame_cpu[framenum]; |
167 | ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list); |
168 | |
169 | list_add_tail(new: &td->fl_list, head: &ftd->fl_list); |
170 | |
171 | td->link = ltd->link; |
172 | wmb(); |
173 | ltd->link = LINK_TO_TD(uhci, td); |
174 | } else { |
175 | td->link = uhci->frame[framenum]; |
176 | wmb(); |
177 | uhci->frame[framenum] = LINK_TO_TD(uhci, td); |
178 | uhci->frame_cpu[framenum] = td; |
179 | } |
180 | } |
181 | |
182 | static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci, |
183 | struct uhci_td *td) |
184 | { |
185 | /* If it's not inserted, don't remove it */ |
186 | if (td->frame == -1) { |
187 | WARN_ON(!list_empty(&td->fl_list)); |
188 | return; |
189 | } |
190 | |
191 | if (uhci->frame_cpu[td->frame] == td) { |
192 | if (list_empty(head: &td->fl_list)) { |
193 | uhci->frame[td->frame] = td->link; |
194 | uhci->frame_cpu[td->frame] = NULL; |
195 | } else { |
196 | struct uhci_td *ntd; |
197 | |
198 | ntd = list_entry(td->fl_list.next, |
199 | struct uhci_td, |
200 | fl_list); |
201 | uhci->frame[td->frame] = LINK_TO_TD(uhci, ntd); |
202 | uhci->frame_cpu[td->frame] = ntd; |
203 | } |
204 | } else { |
205 | struct uhci_td *ptd; |
206 | |
207 | ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list); |
208 | ptd->link = td->link; |
209 | } |
210 | |
211 | list_del_init(entry: &td->fl_list); |
212 | td->frame = -1; |
213 | } |
214 | |
215 | static inline void uhci_remove_tds_from_frame(struct uhci_hcd *uhci, |
216 | unsigned int ) |
217 | { |
218 | struct uhci_td *ftd, *ltd; |
219 | |
220 | framenum &= (UHCI_NUMFRAMES - 1); |
221 | |
222 | ftd = uhci->frame_cpu[framenum]; |
223 | if (ftd) { |
224 | ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list); |
225 | uhci->frame[framenum] = ltd->link; |
226 | uhci->frame_cpu[framenum] = NULL; |
227 | |
228 | while (!list_empty(head: &ftd->fl_list)) |
229 | list_del_init(entry: ftd->fl_list.prev); |
230 | } |
231 | } |
232 | |
233 | /* |
234 | * Remove all the TDs for an Isochronous URB from the frame list |
235 | */ |
236 | static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb) |
237 | { |
238 | struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; |
239 | struct uhci_td *td; |
240 | |
241 | list_for_each_entry(td, &urbp->td_list, list) |
242 | uhci_remove_td_from_frame_list(uhci, td); |
243 | } |
244 | |
245 | static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, |
246 | struct usb_device *udev, struct usb_host_endpoint *hep) |
247 | { |
248 | dma_addr_t dma_handle; |
249 | struct uhci_qh *qh; |
250 | |
251 | qh = dma_pool_zalloc(pool: uhci->qh_pool, GFP_ATOMIC, handle: &dma_handle); |
252 | if (!qh) |
253 | return NULL; |
254 | |
255 | qh->dma_handle = dma_handle; |
256 | |
257 | qh->element = UHCI_PTR_TERM(uhci); |
258 | qh->link = UHCI_PTR_TERM(uhci); |
259 | |
260 | INIT_LIST_HEAD(list: &qh->queue); |
261 | INIT_LIST_HEAD(list: &qh->node); |
262 | |
263 | if (udev) { /* Normal QH */ |
264 | qh->type = usb_endpoint_type(epd: &hep->desc); |
265 | if (qh->type != USB_ENDPOINT_XFER_ISOC) { |
266 | qh->dummy_td = uhci_alloc_td(uhci); |
267 | if (!qh->dummy_td) { |
268 | dma_pool_free(pool: uhci->qh_pool, vaddr: qh, addr: dma_handle); |
269 | return NULL; |
270 | } |
271 | } |
272 | qh->state = QH_STATE_IDLE; |
273 | qh->hep = hep; |
274 | qh->udev = udev; |
275 | hep->hcpriv = qh; |
276 | |
277 | if (qh->type == USB_ENDPOINT_XFER_INT || |
278 | qh->type == USB_ENDPOINT_XFER_ISOC) |
279 | qh->load = usb_calc_bus_time(speed: udev->speed, |
280 | is_input: usb_endpoint_dir_in(epd: &hep->desc), |
281 | isoc: qh->type == USB_ENDPOINT_XFER_ISOC, |
282 | bytecount: usb_endpoint_maxp(epd: &hep->desc)) |
283 | / 1000 + 1; |
284 | |
285 | } else { /* Skeleton QH */ |
286 | qh->state = QH_STATE_ACTIVE; |
287 | qh->type = -1; |
288 | } |
289 | return qh; |
290 | } |
291 | |
292 | static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) |
293 | { |
294 | WARN_ON(qh->state != QH_STATE_IDLE && qh->udev); |
295 | if (!list_empty(head: &qh->queue)) |
296 | dev_WARN(uhci_dev(uhci), "qh %p list not empty!\n" , qh); |
297 | |
298 | list_del(entry: &qh->node); |
299 | if (qh->udev) { |
300 | qh->hep->hcpriv = NULL; |
301 | if (qh->dummy_td) |
302 | uhci_free_td(uhci, td: qh->dummy_td); |
303 | } |
304 | dma_pool_free(pool: uhci->qh_pool, vaddr: qh, addr: qh->dma_handle); |
305 | } |
306 | |
307 | /* |
308 | * When a queue is stopped and a dequeued URB is given back, adjust |
309 | * the previous TD link (if the URB isn't first on the queue) or |
310 | * save its toggle value (if it is first and is currently executing). |
311 | * |
312 | * Returns 0 if the URB should not yet be given back, 1 otherwise. |
313 | */ |
314 | static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh, |
315 | struct urb *urb) |
316 | { |
317 | struct urb_priv *urbp = urb->hcpriv; |
318 | struct uhci_td *td; |
319 | int ret = 1; |
320 | |
321 | /* Isochronous pipes don't use toggles and their TD link pointers |
322 | * get adjusted during uhci_urb_dequeue(). But since their queues |
323 | * cannot truly be stopped, we have to watch out for dequeues |
324 | * occurring after the nominal unlink frame. */ |
325 | if (qh->type == USB_ENDPOINT_XFER_ISOC) { |
326 | ret = (uhci->frame_number + uhci->is_stopped != |
327 | qh->unlink_frame); |
328 | goto done; |
329 | } |
330 | |
331 | /* If the URB isn't first on its queue, adjust the link pointer |
332 | * of the last TD in the previous URB. The toggle doesn't need |
333 | * to be saved since this URB can't be executing yet. */ |
334 | if (qh->queue.next != &urbp->node) { |
335 | struct urb_priv *purbp; |
336 | struct uhci_td *ptd; |
337 | |
338 | purbp = list_entry(urbp->node.prev, struct urb_priv, node); |
339 | WARN_ON(list_empty(&purbp->td_list)); |
340 | ptd = list_entry(purbp->td_list.prev, struct uhci_td, |
341 | list); |
342 | td = list_entry(urbp->td_list.prev, struct uhci_td, |
343 | list); |
344 | ptd->link = td->link; |
345 | goto done; |
346 | } |
347 | |
348 | /* If the QH element pointer is UHCI_PTR_TERM then then currently |
349 | * executing URB has already been unlinked, so this one isn't it. */ |
350 | if (qh_element(qh) == UHCI_PTR_TERM(uhci)) |
351 | goto done; |
352 | qh->element = UHCI_PTR_TERM(uhci); |
353 | |
354 | /* Control pipes don't have to worry about toggles */ |
355 | if (qh->type == USB_ENDPOINT_XFER_CONTROL) |
356 | goto done; |
357 | |
358 | /* Save the next toggle value */ |
359 | WARN_ON(list_empty(&urbp->td_list)); |
360 | td = list_entry(urbp->td_list.next, struct uhci_td, list); |
361 | qh->needs_fixup = 1; |
362 | qh->initial_toggle = uhci_toggle(td_token(uhci, td)); |
363 | |
364 | done: |
365 | return ret; |
366 | } |
367 | |
368 | /* |
369 | * Fix up the data toggles for URBs in a queue, when one of them |
370 | * terminates early (short transfer, error, or dequeued). |
371 | */ |
372 | static void uhci_fixup_toggles(struct uhci_hcd *uhci, struct uhci_qh *qh, |
373 | int skip_first) |
374 | { |
375 | struct urb_priv *urbp = NULL; |
376 | struct uhci_td *td; |
377 | unsigned int toggle = qh->initial_toggle; |
378 | unsigned int pipe; |
379 | |
380 | /* Fixups for a short transfer start with the second URB in the |
381 | * queue (the short URB is the first). */ |
382 | if (skip_first) |
383 | urbp = list_entry(qh->queue.next, struct urb_priv, node); |
384 | |
385 | /* When starting with the first URB, if the QH element pointer is |
386 | * still valid then we know the URB's toggles are okay. */ |
387 | else if (qh_element(qh) != UHCI_PTR_TERM(uhci)) |
388 | toggle = 2; |
389 | |
390 | /* Fix up the toggle for the URBs in the queue. Normally this |
391 | * loop won't run more than once: When an error or short transfer |
392 | * occurs, the queue usually gets emptied. */ |
393 | urbp = list_prepare_entry(urbp, &qh->queue, node); |
394 | list_for_each_entry_continue(urbp, &qh->queue, node) { |
395 | |
396 | /* If the first TD has the right toggle value, we don't |
397 | * need to change any toggles in this URB */ |
398 | td = list_entry(urbp->td_list.next, struct uhci_td, list); |
399 | if (toggle > 1 || uhci_toggle(td_token(uhci, td)) == toggle) { |
400 | td = list_entry(urbp->td_list.prev, struct uhci_td, |
401 | list); |
402 | toggle = uhci_toggle(td_token(uhci, td)) ^ 1; |
403 | |
404 | /* Otherwise all the toggles in the URB have to be switched */ |
405 | } else { |
406 | list_for_each_entry(td, &urbp->td_list, list) { |
407 | td->token ^= cpu_to_hc32(uhci, |
408 | TD_TOKEN_TOGGLE); |
409 | toggle ^= 1; |
410 | } |
411 | } |
412 | } |
413 | |
414 | wmb(); |
415 | pipe = list_entry(qh->queue.next, struct urb_priv, node)->urb->pipe; |
416 | usb_settoggle(qh->udev, usb_pipeendpoint(pipe), |
417 | usb_pipeout(pipe), toggle); |
418 | qh->needs_fixup = 0; |
419 | } |
420 | |
421 | /* |
422 | * Link an Isochronous QH into its skeleton's list |
423 | */ |
424 | static inline void link_iso(struct uhci_hcd *uhci, struct uhci_qh *qh) |
425 | { |
426 | list_add_tail(new: &qh->node, head: &uhci->skel_iso_qh->node); |
427 | |
428 | /* Isochronous QHs aren't linked by the hardware */ |
429 | } |
430 | |
431 | /* |
432 | * Link a high-period interrupt QH into the schedule at the end of its |
433 | * skeleton's list |
434 | */ |
435 | static void link_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh) |
436 | { |
437 | struct uhci_qh *pqh; |
438 | |
439 | list_add_tail(new: &qh->node, head: &uhci->skelqh[qh->skel]->node); |
440 | |
441 | pqh = list_entry(qh->node.prev, struct uhci_qh, node); |
442 | qh->link = pqh->link; |
443 | wmb(); |
444 | pqh->link = LINK_TO_QH(uhci, qh); |
445 | } |
446 | |
447 | /* |
448 | * Link a period-1 interrupt or async QH into the schedule at the |
449 | * correct spot in the async skeleton's list, and update the FSBR link |
450 | */ |
451 | static void link_async(struct uhci_hcd *uhci, struct uhci_qh *qh) |
452 | { |
453 | struct uhci_qh *pqh; |
454 | __hc32 link_to_new_qh; |
455 | |
456 | /* Find the predecessor QH for our new one and insert it in the list. |
457 | * The list of QHs is expected to be short, so linear search won't |
458 | * take too long. */ |
459 | list_for_each_entry_reverse(pqh, &uhci->skel_async_qh->node, node) { |
460 | if (pqh->skel <= qh->skel) |
461 | break; |
462 | } |
463 | list_add(new: &qh->node, head: &pqh->node); |
464 | |
465 | /* Link it into the schedule */ |
466 | qh->link = pqh->link; |
467 | wmb(); |
468 | link_to_new_qh = LINK_TO_QH(uhci, qh); |
469 | pqh->link = link_to_new_qh; |
470 | |
471 | /* If this is now the first FSBR QH, link the terminating skeleton |
472 | * QH to it. */ |
473 | if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR) |
474 | uhci->skel_term_qh->link = link_to_new_qh; |
475 | } |
476 | |
477 | /* |
478 | * Put a QH on the schedule in both hardware and software |
479 | */ |
480 | static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) |
481 | { |
482 | WARN_ON(list_empty(&qh->queue)); |
483 | |
484 | /* Set the element pointer if it isn't set already. |
485 | * This isn't needed for Isochronous queues, but it doesn't hurt. */ |
486 | if (qh_element(qh) == UHCI_PTR_TERM(uhci)) { |
487 | struct urb_priv *urbp = list_entry(qh->queue.next, |
488 | struct urb_priv, node); |
489 | struct uhci_td *td = list_entry(urbp->td_list.next, |
490 | struct uhci_td, list); |
491 | |
492 | qh->element = LINK_TO_TD(uhci, td); |
493 | } |
494 | |
495 | /* Treat the queue as if it has just advanced */ |
496 | qh->wait_expired = 0; |
497 | qh->advance_jiffies = jiffies; |
498 | |
499 | if (qh->state == QH_STATE_ACTIVE) |
500 | return; |
501 | qh->state = QH_STATE_ACTIVE; |
502 | |
503 | /* Move the QH from its old list to the correct spot in the appropriate |
504 | * skeleton's list */ |
505 | if (qh == uhci->next_qh) |
506 | uhci->next_qh = list_entry(qh->node.next, struct uhci_qh, |
507 | node); |
508 | list_del(entry: &qh->node); |
509 | |
510 | if (qh->skel == SKEL_ISO) |
511 | link_iso(uhci, qh); |
512 | else if (qh->skel < SKEL_ASYNC) |
513 | link_interrupt(uhci, qh); |
514 | else |
515 | link_async(uhci, qh); |
516 | } |
517 | |
518 | /* |
519 | * Unlink a high-period interrupt QH from the schedule |
520 | */ |
521 | static void unlink_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh) |
522 | { |
523 | struct uhci_qh *pqh; |
524 | |
525 | pqh = list_entry(qh->node.prev, struct uhci_qh, node); |
526 | pqh->link = qh->link; |
527 | mb(); |
528 | } |
529 | |
530 | /* |
531 | * Unlink a period-1 interrupt or async QH from the schedule |
532 | */ |
533 | static void unlink_async(struct uhci_hcd *uhci, struct uhci_qh *qh) |
534 | { |
535 | struct uhci_qh *pqh; |
536 | __hc32 link_to_next_qh = qh->link; |
537 | |
538 | pqh = list_entry(qh->node.prev, struct uhci_qh, node); |
539 | pqh->link = link_to_next_qh; |
540 | |
541 | /* If this was the old first FSBR QH, link the terminating skeleton |
542 | * QH to the next (new first FSBR) QH. */ |
543 | if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR) |
544 | uhci->skel_term_qh->link = link_to_next_qh; |
545 | mb(); |
546 | } |
547 | |
548 | /* |
549 | * Take a QH off the hardware schedule |
550 | */ |
551 | static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) |
552 | { |
553 | if (qh->state == QH_STATE_UNLINKING) |
554 | return; |
555 | WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev); |
556 | qh->state = QH_STATE_UNLINKING; |
557 | |
558 | /* Unlink the QH from the schedule and record when we did it */ |
559 | if (qh->skel == SKEL_ISO) |
560 | ; |
561 | else if (qh->skel < SKEL_ASYNC) |
562 | unlink_interrupt(uhci, qh); |
563 | else |
564 | unlink_async(uhci, qh); |
565 | |
566 | uhci_get_current_frame_number(uhci); |
567 | qh->unlink_frame = uhci->frame_number; |
568 | |
569 | /* Force an interrupt so we know when the QH is fully unlinked */ |
570 | if (list_empty(head: &uhci->skel_unlink_qh->node) || uhci->is_stopped) |
571 | uhci_set_next_interrupt(uhci); |
572 | |
573 | /* Move the QH from its old list to the end of the unlinking list */ |
574 | if (qh == uhci->next_qh) |
575 | uhci->next_qh = list_entry(qh->node.next, struct uhci_qh, |
576 | node); |
577 | list_move_tail(list: &qh->node, head: &uhci->skel_unlink_qh->node); |
578 | } |
579 | |
580 | /* |
581 | * When we and the controller are through with a QH, it becomes IDLE. |
582 | * This happens when a QH has been off the schedule (on the unlinking |
583 | * list) for more than one frame, or when an error occurs while adding |
584 | * the first URB onto a new QH. |
585 | */ |
586 | static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh) |
587 | { |
588 | WARN_ON(qh->state == QH_STATE_ACTIVE); |
589 | |
590 | if (qh == uhci->next_qh) |
591 | uhci->next_qh = list_entry(qh->node.next, struct uhci_qh, |
592 | node); |
593 | list_move(list: &qh->node, head: &uhci->idle_qh_list); |
594 | qh->state = QH_STATE_IDLE; |
595 | |
596 | /* Now that the QH is idle, its post_td isn't being used */ |
597 | if (qh->post_td) { |
598 | uhci_free_td(uhci, td: qh->post_td); |
599 | qh->post_td = NULL; |
600 | } |
601 | |
602 | /* If anyone is waiting for a QH to become idle, wake them up */ |
603 | if (uhci->num_waiting) |
604 | wake_up_all(&uhci->waitqh); |
605 | } |
606 | |
607 | /* |
608 | * Find the highest existing bandwidth load for a given phase and period. |
609 | */ |
610 | static int uhci_highest_load(struct uhci_hcd *uhci, int phase, int period) |
611 | { |
612 | int highest_load = uhci->load[phase]; |
613 | |
614 | for (phase += period; phase < MAX_PHASE; phase += period) |
615 | highest_load = max_t(int, highest_load, uhci->load[phase]); |
616 | return highest_load; |
617 | } |
618 | |
619 | /* |
620 | * Set qh->phase to the optimal phase for a periodic transfer and |
621 | * check whether the bandwidth requirement is acceptable. |
622 | */ |
623 | static int uhci_check_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh) |
624 | { |
625 | int minimax_load; |
626 | |
627 | /* Find the optimal phase (unless it is already set) and get |
628 | * its load value. */ |
629 | if (qh->phase >= 0) |
630 | minimax_load = uhci_highest_load(uhci, phase: qh->phase, period: qh->period); |
631 | else { |
632 | int phase, load; |
633 | int max_phase = min_t(int, MAX_PHASE, qh->period); |
634 | |
635 | qh->phase = 0; |
636 | minimax_load = uhci_highest_load(uhci, phase: qh->phase, period: qh->period); |
637 | for (phase = 1; phase < max_phase; ++phase) { |
638 | load = uhci_highest_load(uhci, phase, period: qh->period); |
639 | if (load < minimax_load) { |
640 | minimax_load = load; |
641 | qh->phase = phase; |
642 | } |
643 | } |
644 | } |
645 | |
646 | /* Maximum allowable periodic bandwidth is 90%, or 900 us per frame */ |
647 | if (minimax_load + qh->load > 900) { |
648 | dev_dbg(uhci_dev(uhci), "bandwidth allocation failed: " |
649 | "period %d, phase %d, %d + %d us\n" , |
650 | qh->period, qh->phase, minimax_load, qh->load); |
651 | return -ENOSPC; |
652 | } |
653 | return 0; |
654 | } |
655 | |
656 | /* |
657 | * Reserve a periodic QH's bandwidth in the schedule |
658 | */ |
659 | static void uhci_reserve_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh) |
660 | { |
661 | int i; |
662 | int load = qh->load; |
663 | char *p = "??" ; |
664 | |
665 | for (i = qh->phase; i < MAX_PHASE; i += qh->period) { |
666 | uhci->load[i] += load; |
667 | uhci->total_load += load; |
668 | } |
669 | uhci_to_hcd(uhci)->self.bandwidth_allocated = |
670 | uhci->total_load / MAX_PHASE; |
671 | switch (qh->type) { |
672 | case USB_ENDPOINT_XFER_INT: |
673 | ++uhci_to_hcd(uhci)->self.bandwidth_int_reqs; |
674 | p = "INT" ; |
675 | break; |
676 | case USB_ENDPOINT_XFER_ISOC: |
677 | ++uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs; |
678 | p = "ISO" ; |
679 | break; |
680 | } |
681 | qh->bandwidth_reserved = 1; |
682 | dev_dbg(uhci_dev(uhci), |
683 | "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n" , |
684 | "reserve" , qh->udev->devnum, |
685 | qh->hep->desc.bEndpointAddress, p, |
686 | qh->period, qh->phase, load); |
687 | } |
688 | |
689 | /* |
690 | * Release a periodic QH's bandwidth reservation |
691 | */ |
692 | static void uhci_release_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh) |
693 | { |
694 | int i; |
695 | int load = qh->load; |
696 | char *p = "??" ; |
697 | |
698 | for (i = qh->phase; i < MAX_PHASE; i += qh->period) { |
699 | uhci->load[i] -= load; |
700 | uhci->total_load -= load; |
701 | } |
702 | uhci_to_hcd(uhci)->self.bandwidth_allocated = |
703 | uhci->total_load / MAX_PHASE; |
704 | switch (qh->type) { |
705 | case USB_ENDPOINT_XFER_INT: |
706 | --uhci_to_hcd(uhci)->self.bandwidth_int_reqs; |
707 | p = "INT" ; |
708 | break; |
709 | case USB_ENDPOINT_XFER_ISOC: |
710 | --uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs; |
711 | p = "ISO" ; |
712 | break; |
713 | } |
714 | qh->bandwidth_reserved = 0; |
715 | dev_dbg(uhci_dev(uhci), |
716 | "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n" , |
717 | "release" , qh->udev->devnum, |
718 | qh->hep->desc.bEndpointAddress, p, |
719 | qh->period, qh->phase, load); |
720 | } |
721 | |
722 | static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, |
723 | struct urb *urb) |
724 | { |
725 | struct urb_priv *urbp; |
726 | |
727 | urbp = kmem_cache_zalloc(k: uhci_up_cachep, GFP_ATOMIC); |
728 | if (!urbp) |
729 | return NULL; |
730 | |
731 | urbp->urb = urb; |
732 | urb->hcpriv = urbp; |
733 | |
734 | INIT_LIST_HEAD(list: &urbp->node); |
735 | INIT_LIST_HEAD(list: &urbp->td_list); |
736 | |
737 | return urbp; |
738 | } |
739 | |
740 | static void uhci_free_urb_priv(struct uhci_hcd *uhci, |
741 | struct urb_priv *urbp) |
742 | { |
743 | struct uhci_td *td, *tmp; |
744 | |
745 | if (!list_empty(head: &urbp->node)) |
746 | dev_WARN(uhci_dev(uhci), "urb %p still on QH's list!\n" , |
747 | urbp->urb); |
748 | |
749 | list_for_each_entry_safe(td, tmp, &urbp->td_list, list) { |
750 | uhci_remove_td_from_urbp(td); |
751 | uhci_free_td(uhci, td); |
752 | } |
753 | |
754 | kmem_cache_free(s: uhci_up_cachep, objp: urbp); |
755 | } |
756 | |
757 | /* |
758 | * Map status to standard result codes |
759 | * |
760 | * <status> is (td_status(uhci, td) & 0xF60000), a.k.a. |
761 | * uhci_status_bits(td_status(uhci, td)). |
762 | * Note: <status> does not include the TD_CTRL_NAK bit. |
763 | * <dir_out> is True for output TDs and False for input TDs. |
764 | */ |
765 | static int uhci_map_status(int status, int dir_out) |
766 | { |
767 | if (!status) |
768 | return 0; |
769 | if (status & TD_CTRL_BITSTUFF) /* Bitstuff error */ |
770 | return -EPROTO; |
771 | if (status & TD_CTRL_CRCTIMEO) { /* CRC/Timeout */ |
772 | if (dir_out) |
773 | return -EPROTO; |
774 | else |
775 | return -EILSEQ; |
776 | } |
777 | if (status & TD_CTRL_BABBLE) /* Babble */ |
778 | return -EOVERFLOW; |
779 | if (status & TD_CTRL_DBUFERR) /* Buffer error */ |
780 | return -ENOSR; |
781 | if (status & TD_CTRL_STALLED) /* Stalled */ |
782 | return -EPIPE; |
783 | return 0; |
784 | } |
785 | |
786 | /* |
787 | * Control transfers |
788 | */ |
789 | static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, |
790 | struct uhci_qh *qh) |
791 | { |
792 | struct uhci_td *td; |
793 | unsigned long destination, status; |
794 | int maxsze = usb_endpoint_maxp(epd: &qh->hep->desc); |
795 | int len = urb->transfer_buffer_length; |
796 | dma_addr_t data = urb->transfer_dma; |
797 | __hc32 *plink; |
798 | struct urb_priv *urbp = urb->hcpriv; |
799 | int skel; |
800 | |
801 | /* The "pipe" thing contains the destination in bits 8--18 */ |
802 | destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP; |
803 | |
804 | /* 3 errors, dummy TD remains inactive */ |
805 | status = uhci_maxerr(3); |
806 | if (urb->dev->speed == USB_SPEED_LOW) |
807 | status |= TD_CTRL_LS; |
808 | |
809 | /* |
810 | * Build the TD for the control request setup packet |
811 | */ |
812 | td = qh->dummy_td; |
813 | uhci_add_td_to_urbp(td, urbp); |
814 | uhci_fill_td(uhci, td, status, token: destination | uhci_explen(8), |
815 | buffer: urb->setup_dma); |
816 | plink = &td->link; |
817 | status |= TD_CTRL_ACTIVE; |
818 | |
819 | /* |
820 | * If direction is "send", change the packet ID from SETUP (0x2D) |
821 | * to OUT (0xE1). Else change it from SETUP to IN (0x69) and |
822 | * set Short Packet Detect (SPD) for all data packets. |
823 | * |
824 | * 0-length transfers always get treated as "send". |
825 | */ |
826 | if (usb_pipeout(urb->pipe) || len == 0) |
827 | destination ^= (USB_PID_SETUP ^ USB_PID_OUT); |
828 | else { |
829 | destination ^= (USB_PID_SETUP ^ USB_PID_IN); |
830 | status |= TD_CTRL_SPD; |
831 | } |
832 | |
833 | /* |
834 | * Build the DATA TDs |
835 | */ |
836 | while (len > 0) { |
837 | int pktsze = maxsze; |
838 | |
839 | if (len <= pktsze) { /* The last data packet */ |
840 | pktsze = len; |
841 | status &= ~TD_CTRL_SPD; |
842 | } |
843 | |
844 | td = uhci_alloc_td(uhci); |
845 | if (!td) |
846 | goto nomem; |
847 | *plink = LINK_TO_TD(uhci, td); |
848 | |
849 | /* Alternate Data0/1 (start with Data1) */ |
850 | destination ^= TD_TOKEN_TOGGLE; |
851 | |
852 | uhci_add_td_to_urbp(td, urbp); |
853 | uhci_fill_td(uhci, td, status, |
854 | token: destination | uhci_explen(pktsze), buffer: data); |
855 | plink = &td->link; |
856 | |
857 | data += pktsze; |
858 | len -= pktsze; |
859 | } |
860 | |
861 | /* |
862 | * Build the final TD for control status |
863 | */ |
864 | td = uhci_alloc_td(uhci); |
865 | if (!td) |
866 | goto nomem; |
867 | *plink = LINK_TO_TD(uhci, td); |
868 | |
869 | /* Change direction for the status transaction */ |
870 | destination ^= (USB_PID_IN ^ USB_PID_OUT); |
871 | destination |= TD_TOKEN_TOGGLE; /* End in Data1 */ |
872 | |
873 | uhci_add_td_to_urbp(td, urbp); |
874 | uhci_fill_td(uhci, td, status: status | TD_CTRL_IOC, |
875 | token: destination | uhci_explen(0), buffer: 0); |
876 | plink = &td->link; |
877 | |
878 | /* |
879 | * Build the new dummy TD and activate the old one |
880 | */ |
881 | td = uhci_alloc_td(uhci); |
882 | if (!td) |
883 | goto nomem; |
884 | *plink = LINK_TO_TD(uhci, td); |
885 | |
886 | uhci_fill_td(uhci, td, status: 0, USB_PID_OUT | uhci_explen(0), buffer: 0); |
887 | wmb(); |
888 | qh->dummy_td->status |= cpu_to_hc32(uhci, TD_CTRL_ACTIVE); |
889 | qh->dummy_td = td; |
890 | |
891 | /* Low-speed transfers get a different queue, and won't hog the bus. |
892 | * Also, some devices enumerate better without FSBR; the easiest way |
893 | * to do that is to put URBs on the low-speed queue while the device |
894 | * isn't in the CONFIGURED state. */ |
895 | if (urb->dev->speed == USB_SPEED_LOW || |
896 | urb->dev->state != USB_STATE_CONFIGURED) |
897 | skel = SKEL_LS_CONTROL; |
898 | else { |
899 | skel = SKEL_FS_CONTROL; |
900 | uhci_add_fsbr(uhci, urb); |
901 | } |
902 | if (qh->state != QH_STATE_ACTIVE) |
903 | qh->skel = skel; |
904 | return 0; |
905 | |
906 | nomem: |
907 | /* Remove the dummy TD from the td_list so it doesn't get freed */ |
908 | uhci_remove_td_from_urbp(td: qh->dummy_td); |
909 | return -ENOMEM; |
910 | } |
911 | |
912 | /* |
913 | * Common submit for bulk and interrupt |
914 | */ |
915 | static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, |
916 | struct uhci_qh *qh) |
917 | { |
918 | struct uhci_td *td; |
919 | unsigned long destination, status; |
920 | int maxsze = usb_endpoint_maxp(epd: &qh->hep->desc); |
921 | int len = urb->transfer_buffer_length; |
922 | int this_sg_len; |
923 | dma_addr_t data; |
924 | __hc32 *plink; |
925 | struct urb_priv *urbp = urb->hcpriv; |
926 | unsigned int toggle; |
927 | struct scatterlist *sg; |
928 | int i; |
929 | |
930 | if (len < 0) |
931 | return -EINVAL; |
932 | |
933 | /* The "pipe" thing contains the destination in bits 8--18 */ |
934 | destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe); |
935 | toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), |
936 | usb_pipeout(urb->pipe)); |
937 | |
938 | /* 3 errors, dummy TD remains inactive */ |
939 | status = uhci_maxerr(3); |
940 | if (urb->dev->speed == USB_SPEED_LOW) |
941 | status |= TD_CTRL_LS; |
942 | if (usb_pipein(urb->pipe)) |
943 | status |= TD_CTRL_SPD; |
944 | |
945 | i = urb->num_mapped_sgs; |
946 | if (len > 0 && i > 0) { |
947 | sg = urb->sg; |
948 | data = sg_dma_address(sg); |
949 | |
950 | /* urb->transfer_buffer_length may be smaller than the |
951 | * size of the scatterlist (or vice versa) |
952 | */ |
953 | this_sg_len = min_t(int, sg_dma_len(sg), len); |
954 | } else { |
955 | sg = NULL; |
956 | data = urb->transfer_dma; |
957 | this_sg_len = len; |
958 | } |
959 | /* |
960 | * Build the DATA TDs |
961 | */ |
962 | plink = NULL; |
963 | td = qh->dummy_td; |
964 | for (;;) { /* Allow zero length packets */ |
965 | int pktsze = maxsze; |
966 | |
967 | if (len <= pktsze) { /* The last packet */ |
968 | pktsze = len; |
969 | if (!(urb->transfer_flags & URB_SHORT_NOT_OK)) |
970 | status &= ~TD_CTRL_SPD; |
971 | } |
972 | |
973 | if (plink) { |
974 | td = uhci_alloc_td(uhci); |
975 | if (!td) |
976 | goto nomem; |
977 | *plink = LINK_TO_TD(uhci, td); |
978 | } |
979 | uhci_add_td_to_urbp(td, urbp); |
980 | uhci_fill_td(uhci, td, status, |
981 | token: destination | uhci_explen(pktsze) | |
982 | (toggle << TD_TOKEN_TOGGLE_SHIFT), |
983 | buffer: data); |
984 | plink = &td->link; |
985 | status |= TD_CTRL_ACTIVE; |
986 | |
987 | toggle ^= 1; |
988 | data += pktsze; |
989 | this_sg_len -= pktsze; |
990 | len -= maxsze; |
991 | if (this_sg_len <= 0) { |
992 | if (--i <= 0 || len <= 0) |
993 | break; |
994 | sg = sg_next(sg); |
995 | data = sg_dma_address(sg); |
996 | this_sg_len = min_t(int, sg_dma_len(sg), len); |
997 | } |
998 | } |
999 | |
1000 | /* |
1001 | * URB_ZERO_PACKET means adding a 0-length packet, if direction |
1002 | * is OUT and the transfer_length was an exact multiple of maxsze, |
1003 | * hence (len = transfer_length - N * maxsze) == 0 |
1004 | * however, if transfer_length == 0, the zero packet was already |
1005 | * prepared above. |
1006 | */ |
1007 | if ((urb->transfer_flags & URB_ZERO_PACKET) && |
1008 | usb_pipeout(urb->pipe) && len == 0 && |
1009 | urb->transfer_buffer_length > 0) { |
1010 | td = uhci_alloc_td(uhci); |
1011 | if (!td) |
1012 | goto nomem; |
1013 | *plink = LINK_TO_TD(uhci, td); |
1014 | |
1015 | uhci_add_td_to_urbp(td, urbp); |
1016 | uhci_fill_td(uhci, td, status, |
1017 | token: destination | uhci_explen(0) | |
1018 | (toggle << TD_TOKEN_TOGGLE_SHIFT), |
1019 | buffer: data); |
1020 | plink = &td->link; |
1021 | |
1022 | toggle ^= 1; |
1023 | } |
1024 | |
1025 | /* Set the interrupt-on-completion flag on the last packet. |
1026 | * A more-or-less typical 4 KB URB (= size of one memory page) |
1027 | * will require about 3 ms to transfer; that's a little on the |
1028 | * fast side but not enough to justify delaying an interrupt |
1029 | * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT |
1030 | * flag setting. */ |
1031 | td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC); |
1032 | |
1033 | /* |
1034 | * Build the new dummy TD and activate the old one |
1035 | */ |
1036 | td = uhci_alloc_td(uhci); |
1037 | if (!td) |
1038 | goto nomem; |
1039 | *plink = LINK_TO_TD(uhci, td); |
1040 | |
1041 | uhci_fill_td(uhci, td, status: 0, USB_PID_OUT | uhci_explen(0), buffer: 0); |
1042 | wmb(); |
1043 | qh->dummy_td->status |= cpu_to_hc32(uhci, TD_CTRL_ACTIVE); |
1044 | qh->dummy_td = td; |
1045 | |
1046 | usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), |
1047 | usb_pipeout(urb->pipe), toggle); |
1048 | return 0; |
1049 | |
1050 | nomem: |
1051 | /* Remove the dummy TD from the td_list so it doesn't get freed */ |
1052 | uhci_remove_td_from_urbp(td: qh->dummy_td); |
1053 | return -ENOMEM; |
1054 | } |
1055 | |
1056 | static int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, |
1057 | struct uhci_qh *qh) |
1058 | { |
1059 | int ret; |
1060 | |
1061 | /* Can't have low-speed bulk transfers */ |
1062 | if (urb->dev->speed == USB_SPEED_LOW) |
1063 | return -EINVAL; |
1064 | |
1065 | if (qh->state != QH_STATE_ACTIVE) |
1066 | qh->skel = SKEL_BULK; |
1067 | ret = uhci_submit_common(uhci, urb, qh); |
1068 | if (ret == 0) |
1069 | uhci_add_fsbr(uhci, urb); |
1070 | return ret; |
1071 | } |
1072 | |
1073 | static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, |
1074 | struct uhci_qh *qh) |
1075 | { |
1076 | int ret; |
1077 | |
1078 | /* USB 1.1 interrupt transfers only involve one packet per interval. |
1079 | * Drivers can submit URBs of any length, but longer ones will need |
1080 | * multiple intervals to complete. |
1081 | */ |
1082 | |
1083 | if (!qh->bandwidth_reserved) { |
1084 | int exponent; |
1085 | |
1086 | /* Figure out which power-of-two queue to use */ |
1087 | for (exponent = 7; exponent >= 0; --exponent) { |
1088 | if ((1 << exponent) <= urb->interval) |
1089 | break; |
1090 | } |
1091 | if (exponent < 0) |
1092 | return -EINVAL; |
1093 | |
1094 | /* If the slot is full, try a lower period */ |
1095 | do { |
1096 | qh->period = 1 << exponent; |
1097 | qh->skel = SKEL_INDEX(exponent); |
1098 | |
1099 | /* For now, interrupt phase is fixed by the layout |
1100 | * of the QH lists. |
1101 | */ |
1102 | qh->phase = (qh->period / 2) & (MAX_PHASE - 1); |
1103 | ret = uhci_check_bandwidth(uhci, qh); |
1104 | } while (ret != 0 && --exponent >= 0); |
1105 | if (ret) |
1106 | return ret; |
1107 | } else if (qh->period > urb->interval) |
1108 | return -EINVAL; /* Can't decrease the period */ |
1109 | |
1110 | ret = uhci_submit_common(uhci, urb, qh); |
1111 | if (ret == 0) { |
1112 | urb->interval = qh->period; |
1113 | if (!qh->bandwidth_reserved) |
1114 | uhci_reserve_bandwidth(uhci, qh); |
1115 | } |
1116 | return ret; |
1117 | } |
1118 | |
1119 | /* |
1120 | * Fix up the data structures following a short transfer |
1121 | */ |
1122 | static int uhci_fixup_short_transfer(struct uhci_hcd *uhci, |
1123 | struct uhci_qh *qh, struct urb_priv *urbp) |
1124 | { |
1125 | struct uhci_td *td; |
1126 | struct list_head *tmp; |
1127 | int ret; |
1128 | |
1129 | td = list_entry(urbp->td_list.prev, struct uhci_td, list); |
1130 | if (qh->type == USB_ENDPOINT_XFER_CONTROL) { |
1131 | |
1132 | /* When a control transfer is short, we have to restart |
1133 | * the queue at the status stage transaction, which is |
1134 | * the last TD. */ |
1135 | WARN_ON(list_empty(&urbp->td_list)); |
1136 | qh->element = LINK_TO_TD(uhci, td); |
1137 | tmp = td->list.prev; |
1138 | ret = -EINPROGRESS; |
1139 | |
1140 | } else { |
1141 | |
1142 | /* When a bulk/interrupt transfer is short, we have to |
1143 | * fix up the toggles of the following URBs on the queue |
1144 | * before restarting the queue at the next URB. */ |
1145 | qh->initial_toggle = |
1146 | uhci_toggle(td_token(uhci, qh->post_td)) ^ 1; |
1147 | uhci_fixup_toggles(uhci, qh, skip_first: 1); |
1148 | |
1149 | if (list_empty(head: &urbp->td_list)) |
1150 | td = qh->post_td; |
1151 | qh->element = td->link; |
1152 | tmp = urbp->td_list.prev; |
1153 | ret = 0; |
1154 | } |
1155 | |
1156 | /* Remove all the TDs we skipped over, from tmp back to the start */ |
1157 | while (tmp != &urbp->td_list) { |
1158 | td = list_entry(tmp, struct uhci_td, list); |
1159 | tmp = tmp->prev; |
1160 | |
1161 | uhci_remove_td_from_urbp(td); |
1162 | uhci_free_td(uhci, td); |
1163 | } |
1164 | return ret; |
1165 | } |
1166 | |
1167 | /* |
1168 | * Common result for control, bulk, and interrupt |
1169 | */ |
1170 | static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb) |
1171 | { |
1172 | struct urb_priv *urbp = urb->hcpriv; |
1173 | struct uhci_qh *qh = urbp->qh; |
1174 | struct uhci_td *td, *tmp; |
1175 | unsigned status; |
1176 | int ret = 0; |
1177 | |
1178 | list_for_each_entry_safe(td, tmp, &urbp->td_list, list) { |
1179 | unsigned int ctrlstat; |
1180 | int len; |
1181 | |
1182 | ctrlstat = td_status(uhci, td); |
1183 | status = uhci_status_bits(ctrlstat); |
1184 | if (status & TD_CTRL_ACTIVE) |
1185 | return -EINPROGRESS; |
1186 | |
1187 | len = uhci_actual_length(ctrlstat); |
1188 | urb->actual_length += len; |
1189 | |
1190 | if (status) { |
1191 | ret = uhci_map_status(status, |
1192 | uhci_packetout(td_token(uhci, td))); |
1193 | if ((debug == 1 && ret != -EPIPE) || debug > 1) { |
1194 | /* Some debugging code */ |
1195 | dev_dbg(&urb->dev->dev, |
1196 | "%s: failed with status %x\n" , |
1197 | __func__, status); |
1198 | |
1199 | if (debug > 1 && errbuf) { |
1200 | /* Print the chain for debugging */ |
1201 | uhci_show_qh(uhci, qh: urbp->qh, buf: errbuf, |
1202 | ERRBUF_LEN - EXTRA_SPACE, space: 0); |
1203 | lprintk(buf: errbuf); |
1204 | } |
1205 | } |
1206 | |
1207 | /* Did we receive a short packet? */ |
1208 | } else if (len < uhci_expected_length(td_token(uhci, td))) { |
1209 | |
1210 | /* For control transfers, go to the status TD if |
1211 | * this isn't already the last data TD */ |
1212 | if (qh->type == USB_ENDPOINT_XFER_CONTROL) { |
1213 | if (td->list.next != urbp->td_list.prev) |
1214 | ret = 1; |
1215 | } |
1216 | |
1217 | /* For bulk and interrupt, this may be an error */ |
1218 | else if (urb->transfer_flags & URB_SHORT_NOT_OK) |
1219 | ret = -EREMOTEIO; |
1220 | |
1221 | /* Fixup needed only if this isn't the URB's last TD */ |
1222 | else if (&td->list != urbp->td_list.prev) |
1223 | ret = 1; |
1224 | } |
1225 | |
1226 | uhci_remove_td_from_urbp(td); |
1227 | if (qh->post_td) |
1228 | uhci_free_td(uhci, td: qh->post_td); |
1229 | qh->post_td = td; |
1230 | |
1231 | if (ret != 0) |
1232 | goto err; |
1233 | } |
1234 | return ret; |
1235 | |
1236 | err: |
1237 | if (ret < 0) { |
1238 | /* Note that the queue has stopped and save |
1239 | * the next toggle value */ |
1240 | qh->element = UHCI_PTR_TERM(uhci); |
1241 | qh->is_stopped = 1; |
1242 | qh->needs_fixup = (qh->type != USB_ENDPOINT_XFER_CONTROL); |
1243 | qh->initial_toggle = uhci_toggle(td_token(uhci, td)) ^ |
1244 | (ret == -EREMOTEIO); |
1245 | |
1246 | } else /* Short packet received */ |
1247 | ret = uhci_fixup_short_transfer(uhci, qh, urbp); |
1248 | return ret; |
1249 | } |
1250 | |
1251 | /* |
1252 | * Isochronous transfers |
1253 | */ |
1254 | static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb, |
1255 | struct uhci_qh *qh) |
1256 | { |
1257 | struct uhci_td *td = NULL; /* Since urb->number_of_packets > 0 */ |
1258 | int i; |
1259 | unsigned frame, next; |
1260 | unsigned long destination, status; |
1261 | struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; |
1262 | |
1263 | /* Values must not be too big (could overflow below) */ |
1264 | if (urb->interval >= UHCI_NUMFRAMES || |
1265 | urb->number_of_packets >= UHCI_NUMFRAMES) |
1266 | return -EFBIG; |
1267 | |
1268 | uhci_get_current_frame_number(uhci); |
1269 | |
1270 | /* Check the period and figure out the starting frame number */ |
1271 | if (!qh->bandwidth_reserved) { |
1272 | qh->period = urb->interval; |
1273 | qh->phase = -1; /* Find the best phase */ |
1274 | i = uhci_check_bandwidth(uhci, qh); |
1275 | if (i) |
1276 | return i; |
1277 | |
1278 | /* Allow a little time to allocate the TDs */ |
1279 | next = uhci->frame_number + 10; |
1280 | frame = qh->phase; |
1281 | |
1282 | /* Round up to the first available slot */ |
1283 | frame += (next - frame + qh->period - 1) & -qh->period; |
1284 | |
1285 | } else if (qh->period != urb->interval) { |
1286 | return -EINVAL; /* Can't change the period */ |
1287 | |
1288 | } else { |
1289 | next = uhci->frame_number + 1; |
1290 | |
1291 | /* Find the next unused frame */ |
1292 | if (list_empty(head: &qh->queue)) { |
1293 | frame = qh->iso_frame; |
1294 | } else { |
1295 | struct urb *lurb; |
1296 | |
1297 | lurb = list_entry(qh->queue.prev, |
1298 | struct urb_priv, node)->urb; |
1299 | frame = lurb->start_frame + |
1300 | lurb->number_of_packets * |
1301 | lurb->interval; |
1302 | } |
1303 | |
1304 | /* Fell behind? */ |
1305 | if (!uhci_frame_before_eq(next, frame)) { |
1306 | |
1307 | /* USB_ISO_ASAP: Round up to the first available slot */ |
1308 | if (urb->transfer_flags & URB_ISO_ASAP) |
1309 | frame += (next - frame + qh->period - 1) & |
1310 | -qh->period; |
1311 | |
1312 | /* |
1313 | * Not ASAP: Use the next slot in the stream, |
1314 | * no matter what. |
1315 | */ |
1316 | else if (!uhci_frame_before_eq(next, |
1317 | frame + (urb->number_of_packets - 1) * |
1318 | qh->period)) |
1319 | dev_dbg(uhci_dev(uhci), "iso underrun %p (%u+%u < %u)\n" , |
1320 | urb, frame, |
1321 | (urb->number_of_packets - 1) * |
1322 | qh->period, |
1323 | next); |
1324 | } |
1325 | } |
1326 | |
1327 | /* Make sure we won't have to go too far into the future */ |
1328 | if (uhci_frame_before_eq(uhci->last_iso_frame + UHCI_NUMFRAMES, |
1329 | frame + urb->number_of_packets * urb->interval)) |
1330 | return -EFBIG; |
1331 | urb->start_frame = frame; |
1332 | |
1333 | status = TD_CTRL_ACTIVE | TD_CTRL_IOS; |
1334 | destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe); |
1335 | |
1336 | for (i = 0; i < urb->number_of_packets; i++) { |
1337 | td = uhci_alloc_td(uhci); |
1338 | if (!td) |
1339 | return -ENOMEM; |
1340 | |
1341 | uhci_add_td_to_urbp(td, urbp); |
1342 | uhci_fill_td(uhci, td, status, token: destination | |
1343 | uhci_explen(urb->iso_frame_desc[i].length), |
1344 | buffer: urb->transfer_dma + |
1345 | urb->iso_frame_desc[i].offset); |
1346 | } |
1347 | |
1348 | /* Set the interrupt-on-completion flag on the last packet. */ |
1349 | td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC); |
1350 | |
1351 | /* Add the TDs to the frame list */ |
1352 | frame = urb->start_frame; |
1353 | list_for_each_entry(td, &urbp->td_list, list) { |
1354 | uhci_insert_td_in_frame_list(uhci, td, framenum: frame); |
1355 | frame += qh->period; |
1356 | } |
1357 | |
1358 | if (list_empty(head: &qh->queue)) { |
1359 | qh->iso_packet_desc = &urb->iso_frame_desc[0]; |
1360 | qh->iso_frame = urb->start_frame; |
1361 | } |
1362 | |
1363 | qh->skel = SKEL_ISO; |
1364 | if (!qh->bandwidth_reserved) |
1365 | uhci_reserve_bandwidth(uhci, qh); |
1366 | return 0; |
1367 | } |
1368 | |
1369 | static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb) |
1370 | { |
1371 | struct uhci_td *td, *tmp; |
1372 | struct urb_priv *urbp = urb->hcpriv; |
1373 | struct uhci_qh *qh = urbp->qh; |
1374 | |
1375 | list_for_each_entry_safe(td, tmp, &urbp->td_list, list) { |
1376 | unsigned int ctrlstat; |
1377 | int status; |
1378 | int actlength; |
1379 | |
1380 | if (uhci_frame_before_eq(uhci->cur_iso_frame, qh->iso_frame)) |
1381 | return -EINPROGRESS; |
1382 | |
1383 | uhci_remove_tds_from_frame(uhci, framenum: qh->iso_frame); |
1384 | |
1385 | ctrlstat = td_status(uhci, td); |
1386 | if (ctrlstat & TD_CTRL_ACTIVE) { |
1387 | status = -EXDEV; /* TD was added too late? */ |
1388 | } else { |
1389 | status = uhci_map_status(uhci_status_bits(ctrlstat), |
1390 | usb_pipeout(urb->pipe)); |
1391 | actlength = uhci_actual_length(ctrlstat); |
1392 | |
1393 | urb->actual_length += actlength; |
1394 | qh->iso_packet_desc->actual_length = actlength; |
1395 | qh->iso_packet_desc->status = status; |
1396 | } |
1397 | if (status) |
1398 | urb->error_count++; |
1399 | |
1400 | uhci_remove_td_from_urbp(td); |
1401 | uhci_free_td(uhci, td); |
1402 | qh->iso_frame += qh->period; |
1403 | ++qh->iso_packet_desc; |
1404 | } |
1405 | return 0; |
1406 | } |
1407 | |
1408 | static int uhci_urb_enqueue(struct usb_hcd *hcd, |
1409 | struct urb *urb, gfp_t mem_flags) |
1410 | { |
1411 | int ret; |
1412 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
1413 | unsigned long flags; |
1414 | struct urb_priv *urbp; |
1415 | struct uhci_qh *qh; |
1416 | |
1417 | spin_lock_irqsave(&uhci->lock, flags); |
1418 | |
1419 | ret = usb_hcd_link_urb_to_ep(hcd, urb); |
1420 | if (ret) |
1421 | goto done_not_linked; |
1422 | |
1423 | ret = -ENOMEM; |
1424 | urbp = uhci_alloc_urb_priv(uhci, urb); |
1425 | if (!urbp) |
1426 | goto done; |
1427 | |
1428 | if (urb->ep->hcpriv) |
1429 | qh = urb->ep->hcpriv; |
1430 | else { |
1431 | qh = uhci_alloc_qh(uhci, udev: urb->dev, hep: urb->ep); |
1432 | if (!qh) |
1433 | goto err_no_qh; |
1434 | } |
1435 | urbp->qh = qh; |
1436 | |
1437 | switch (qh->type) { |
1438 | case USB_ENDPOINT_XFER_CONTROL: |
1439 | ret = uhci_submit_control(uhci, urb, qh); |
1440 | break; |
1441 | case USB_ENDPOINT_XFER_BULK: |
1442 | ret = uhci_submit_bulk(uhci, urb, qh); |
1443 | break; |
1444 | case USB_ENDPOINT_XFER_INT: |
1445 | ret = uhci_submit_interrupt(uhci, urb, qh); |
1446 | break; |
1447 | case USB_ENDPOINT_XFER_ISOC: |
1448 | urb->error_count = 0; |
1449 | ret = uhci_submit_isochronous(uhci, urb, qh); |
1450 | break; |
1451 | } |
1452 | if (ret != 0) |
1453 | goto err_submit_failed; |
1454 | |
1455 | /* Add this URB to the QH */ |
1456 | list_add_tail(new: &urbp->node, head: &qh->queue); |
1457 | |
1458 | /* If the new URB is the first and only one on this QH then either |
1459 | * the QH is new and idle or else it's unlinked and waiting to |
1460 | * become idle, so we can activate it right away. But only if the |
1461 | * queue isn't stopped. */ |
1462 | if (qh->queue.next == &urbp->node && !qh->is_stopped) { |
1463 | uhci_activate_qh(uhci, qh); |
1464 | uhci_urbp_wants_fsbr(uhci, urbp); |
1465 | } |
1466 | goto done; |
1467 | |
1468 | err_submit_failed: |
1469 | if (qh->state == QH_STATE_IDLE) |
1470 | uhci_make_qh_idle(uhci, qh); /* Reclaim unused QH */ |
1471 | err_no_qh: |
1472 | uhci_free_urb_priv(uhci, urbp); |
1473 | done: |
1474 | if (ret) |
1475 | usb_hcd_unlink_urb_from_ep(hcd, urb); |
1476 | done_not_linked: |
1477 | spin_unlock_irqrestore(lock: &uhci->lock, flags); |
1478 | return ret; |
1479 | } |
1480 | |
1481 | static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) |
1482 | { |
1483 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
1484 | unsigned long flags; |
1485 | struct uhci_qh *qh; |
1486 | int rc; |
1487 | |
1488 | spin_lock_irqsave(&uhci->lock, flags); |
1489 | rc = usb_hcd_check_unlink_urb(hcd, urb, status); |
1490 | if (rc) |
1491 | goto done; |
1492 | |
1493 | qh = ((struct urb_priv *) urb->hcpriv)->qh; |
1494 | |
1495 | /* Remove Isochronous TDs from the frame list ASAP */ |
1496 | if (qh->type == USB_ENDPOINT_XFER_ISOC) { |
1497 | uhci_unlink_isochronous_tds(uhci, urb); |
1498 | mb(); |
1499 | |
1500 | /* If the URB has already started, update the QH unlink time */ |
1501 | uhci_get_current_frame_number(uhci); |
1502 | if (uhci_frame_before_eq(urb->start_frame, uhci->frame_number)) |
1503 | qh->unlink_frame = uhci->frame_number; |
1504 | } |
1505 | |
1506 | uhci_unlink_qh(uhci, qh); |
1507 | |
1508 | done: |
1509 | spin_unlock_irqrestore(lock: &uhci->lock, flags); |
1510 | return rc; |
1511 | } |
1512 | |
1513 | /* |
1514 | * Finish unlinking an URB and give it back |
1515 | */ |
1516 | static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh, |
1517 | struct urb *urb, int status) |
1518 | __releases(uhci->lock) |
1519 | __acquires(uhci->lock) |
1520 | { |
1521 | struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; |
1522 | |
1523 | if (qh->type == USB_ENDPOINT_XFER_CONTROL) { |
1524 | |
1525 | /* Subtract off the length of the SETUP packet from |
1526 | * urb->actual_length. |
1527 | */ |
1528 | urb->actual_length -= min_t(u32, 8, urb->actual_length); |
1529 | } |
1530 | |
1531 | /* When giving back the first URB in an Isochronous queue, |
1532 | * reinitialize the QH's iso-related members for the next URB. */ |
1533 | else if (qh->type == USB_ENDPOINT_XFER_ISOC && |
1534 | urbp->node.prev == &qh->queue && |
1535 | urbp->node.next != &qh->queue) { |
1536 | struct urb *nurb = list_entry(urbp->node.next, |
1537 | struct urb_priv, node)->urb; |
1538 | |
1539 | qh->iso_packet_desc = &nurb->iso_frame_desc[0]; |
1540 | qh->iso_frame = nurb->start_frame; |
1541 | } |
1542 | |
1543 | /* Take the URB off the QH's queue. If the queue is now empty, |
1544 | * this is a perfect time for a toggle fixup. */ |
1545 | list_del_init(entry: &urbp->node); |
1546 | if (list_empty(head: &qh->queue) && qh->needs_fixup) { |
1547 | usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), |
1548 | usb_pipeout(urb->pipe), qh->initial_toggle); |
1549 | qh->needs_fixup = 0; |
1550 | } |
1551 | |
1552 | uhci_free_urb_priv(uhci, urbp); |
1553 | usb_hcd_unlink_urb_from_ep(hcd: uhci_to_hcd(uhci), urb); |
1554 | |
1555 | spin_unlock(lock: &uhci->lock); |
1556 | usb_hcd_giveback_urb(hcd: uhci_to_hcd(uhci), urb, status); |
1557 | spin_lock(lock: &uhci->lock); |
1558 | |
1559 | /* If the queue is now empty, we can unlink the QH and give up its |
1560 | * reserved bandwidth. */ |
1561 | if (list_empty(head: &qh->queue)) { |
1562 | uhci_unlink_qh(uhci, qh); |
1563 | if (qh->bandwidth_reserved) |
1564 | uhci_release_bandwidth(uhci, qh); |
1565 | } |
1566 | } |
1567 | |
1568 | /* |
1569 | * Scan the URBs in a QH's queue |
1570 | */ |
1571 | #define QH_FINISHED_UNLINKING(qh) \ |
1572 | (qh->state == QH_STATE_UNLINKING && \ |
1573 | uhci->frame_number + uhci->is_stopped != qh->unlink_frame) |
1574 | |
1575 | static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) |
1576 | { |
1577 | struct urb_priv *urbp; |
1578 | struct urb *urb; |
1579 | int status; |
1580 | |
1581 | while (!list_empty(head: &qh->queue)) { |
1582 | urbp = list_entry(qh->queue.next, struct urb_priv, node); |
1583 | urb = urbp->urb; |
1584 | |
1585 | if (qh->type == USB_ENDPOINT_XFER_ISOC) |
1586 | status = uhci_result_isochronous(uhci, urb); |
1587 | else |
1588 | status = uhci_result_common(uhci, urb); |
1589 | if (status == -EINPROGRESS) |
1590 | break; |
1591 | |
1592 | /* Dequeued but completed URBs can't be given back unless |
1593 | * the QH is stopped or has finished unlinking. */ |
1594 | if (urb->unlinked) { |
1595 | if (QH_FINISHED_UNLINKING(qh)) |
1596 | qh->is_stopped = 1; |
1597 | else if (!qh->is_stopped) |
1598 | return; |
1599 | } |
1600 | |
1601 | uhci_giveback_urb(uhci, qh, urb, status); |
1602 | if (status < 0) |
1603 | break; |
1604 | } |
1605 | |
1606 | /* If the QH is neither stopped nor finished unlinking (normal case), |
1607 | * our work here is done. */ |
1608 | if (QH_FINISHED_UNLINKING(qh)) |
1609 | qh->is_stopped = 1; |
1610 | else if (!qh->is_stopped) |
1611 | return; |
1612 | |
1613 | /* Otherwise give back each of the dequeued URBs */ |
1614 | restart: |
1615 | list_for_each_entry(urbp, &qh->queue, node) { |
1616 | urb = urbp->urb; |
1617 | if (urb->unlinked) { |
1618 | |
1619 | /* Fix up the TD links and save the toggles for |
1620 | * non-Isochronous queues. For Isochronous queues, |
1621 | * test for too-recent dequeues. */ |
1622 | if (!uhci_cleanup_queue(uhci, qh, urb)) { |
1623 | qh->is_stopped = 0; |
1624 | return; |
1625 | } |
1626 | uhci_giveback_urb(uhci, qh, urb, status: 0); |
1627 | goto restart; |
1628 | } |
1629 | } |
1630 | qh->is_stopped = 0; |
1631 | |
1632 | /* There are no more dequeued URBs. If there are still URBs on the |
1633 | * queue, the QH can now be re-activated. */ |
1634 | if (!list_empty(head: &qh->queue)) { |
1635 | if (qh->needs_fixup) |
1636 | uhci_fixup_toggles(uhci, qh, skip_first: 0); |
1637 | |
1638 | /* If the first URB on the queue wants FSBR but its time |
1639 | * limit has expired, set the next TD to interrupt on |
1640 | * completion before reactivating the QH. */ |
1641 | urbp = list_entry(qh->queue.next, struct urb_priv, node); |
1642 | if (urbp->fsbr && qh->wait_expired) { |
1643 | struct uhci_td *td = list_entry(urbp->td_list.next, |
1644 | struct uhci_td, list); |
1645 | |
1646 | td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC); |
1647 | } |
1648 | |
1649 | uhci_activate_qh(uhci, qh); |
1650 | } |
1651 | |
1652 | /* The queue is empty. The QH can become idle if it is fully |
1653 | * unlinked. */ |
1654 | else if (QH_FINISHED_UNLINKING(qh)) |
1655 | uhci_make_qh_idle(uhci, qh); |
1656 | } |
1657 | |
1658 | /* |
1659 | * Check for queues that have made some forward progress. |
1660 | * Returns 0 if the queue is not Isochronous, is ACTIVE, and |
1661 | * has not advanced since last examined; 1 otherwise. |
1662 | * |
1663 | * Early Intel controllers have a bug which causes qh->element sometimes |
1664 | * not to advance when a TD completes successfully. The queue remains |
1665 | * stuck on the inactive completed TD. We detect such cases and advance |
1666 | * the element pointer by hand. |
1667 | */ |
1668 | static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh) |
1669 | { |
1670 | struct urb_priv *urbp = NULL; |
1671 | struct uhci_td *td; |
1672 | int ret = 1; |
1673 | unsigned status; |
1674 | |
1675 | if (qh->type == USB_ENDPOINT_XFER_ISOC) |
1676 | goto done; |
1677 | |
1678 | /* Treat an UNLINKING queue as though it hasn't advanced. |
1679 | * This is okay because reactivation will treat it as though |
1680 | * it has advanced, and if it is going to become IDLE then |
1681 | * this doesn't matter anyway. Furthermore it's possible |
1682 | * for an UNLINKING queue not to have any URBs at all, or |
1683 | * for its first URB not to have any TDs (if it was dequeued |
1684 | * just as it completed). So it's not easy in any case to |
1685 | * test whether such queues have advanced. */ |
1686 | if (qh->state != QH_STATE_ACTIVE) { |
1687 | urbp = NULL; |
1688 | status = 0; |
1689 | |
1690 | } else { |
1691 | urbp = list_entry(qh->queue.next, struct urb_priv, node); |
1692 | td = list_entry(urbp->td_list.next, struct uhci_td, list); |
1693 | status = td_status(uhci, td); |
1694 | if (!(status & TD_CTRL_ACTIVE)) { |
1695 | |
1696 | /* We're okay, the queue has advanced */ |
1697 | qh->wait_expired = 0; |
1698 | qh->advance_jiffies = jiffies; |
1699 | goto done; |
1700 | } |
1701 | ret = uhci->is_stopped; |
1702 | } |
1703 | |
1704 | /* The queue hasn't advanced; check for timeout */ |
1705 | if (qh->wait_expired) |
1706 | goto done; |
1707 | |
1708 | if (time_after(jiffies, qh->advance_jiffies + QH_WAIT_TIMEOUT)) { |
1709 | |
1710 | /* Detect the Intel bug and work around it */ |
1711 | if (qh->post_td && qh_element(qh) == |
1712 | LINK_TO_TD(uhci, qh->post_td)) { |
1713 | qh->element = qh->post_td->link; |
1714 | qh->advance_jiffies = jiffies; |
1715 | ret = 1; |
1716 | goto done; |
1717 | } |
1718 | |
1719 | qh->wait_expired = 1; |
1720 | |
1721 | /* If the current URB wants FSBR, unlink it temporarily |
1722 | * so that we can safely set the next TD to interrupt on |
1723 | * completion. That way we'll know as soon as the queue |
1724 | * starts moving again. */ |
1725 | if (urbp && urbp->fsbr && !(status & TD_CTRL_IOC)) |
1726 | uhci_unlink_qh(uhci, qh); |
1727 | |
1728 | } else { |
1729 | /* Unmoving but not-yet-expired queues keep FSBR alive */ |
1730 | if (urbp) |
1731 | uhci_urbp_wants_fsbr(uhci, urbp); |
1732 | } |
1733 | |
1734 | done: |
1735 | return ret; |
1736 | } |
1737 | |
1738 | /* |
1739 | * Process events in the schedule, but only in one thread at a time |
1740 | */ |
1741 | static void uhci_scan_schedule(struct uhci_hcd *uhci) |
1742 | { |
1743 | int i; |
1744 | struct uhci_qh *qh; |
1745 | |
1746 | /* Don't allow re-entrant calls */ |
1747 | if (uhci->scan_in_progress) { |
1748 | uhci->need_rescan = 1; |
1749 | return; |
1750 | } |
1751 | uhci->scan_in_progress = 1; |
1752 | rescan: |
1753 | uhci->need_rescan = 0; |
1754 | uhci->fsbr_is_wanted = 0; |
1755 | |
1756 | uhci_clear_next_interrupt(uhci); |
1757 | uhci_get_current_frame_number(uhci); |
1758 | uhci->cur_iso_frame = uhci->frame_number; |
1759 | |
1760 | /* Go through all the QH queues and process the URBs in each one */ |
1761 | for (i = 0; i < UHCI_NUM_SKELQH - 1; ++i) { |
1762 | uhci->next_qh = list_entry(uhci->skelqh[i]->node.next, |
1763 | struct uhci_qh, node); |
1764 | while ((qh = uhci->next_qh) != uhci->skelqh[i]) { |
1765 | uhci->next_qh = list_entry(qh->node.next, |
1766 | struct uhci_qh, node); |
1767 | |
1768 | if (uhci_advance_check(uhci, qh)) { |
1769 | uhci_scan_qh(uhci, qh); |
1770 | if (qh->state == QH_STATE_ACTIVE) { |
1771 | uhci_urbp_wants_fsbr(uhci, |
1772 | list_entry(qh->queue.next, struct urb_priv, node)); |
1773 | } |
1774 | } |
1775 | } |
1776 | } |
1777 | |
1778 | uhci->last_iso_frame = uhci->cur_iso_frame; |
1779 | if (uhci->need_rescan) |
1780 | goto rescan; |
1781 | uhci->scan_in_progress = 0; |
1782 | |
1783 | if (uhci->fsbr_is_on && !uhci->fsbr_is_wanted && |
1784 | !uhci->fsbr_expiring) { |
1785 | uhci->fsbr_expiring = 1; |
1786 | mod_timer(timer: &uhci->fsbr_timer, expires: jiffies + FSBR_OFF_DELAY); |
1787 | } |
1788 | |
1789 | if (list_empty(head: &uhci->skel_unlink_qh->node)) |
1790 | uhci_clear_next_interrupt(uhci); |
1791 | else |
1792 | uhci_set_next_interrupt(uhci); |
1793 | } |
1794 | |