1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * xHCI host controller driver |
4 | * |
5 | * Copyright (C) 2008 Intel Corp. |
6 | * |
7 | * Author: Sarah Sharp |
8 | * Some code borrowed from the Linux EHCI driver. |
9 | */ |
10 | |
11 | /* |
12 | * Ring initialization rules: |
13 | * 1. Each segment is initialized to zero, except for link TRBs. |
14 | * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or |
15 | * Consumer Cycle State (CCS), depending on ring function. |
16 | * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment. |
17 | * |
18 | * Ring behavior rules: |
19 | * 1. A ring is empty if enqueue == dequeue. This means there will always be at |
20 | * least one free TRB in the ring. This is useful if you want to turn that |
21 | * into a link TRB and expand the ring. |
22 | * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a |
23 | * link TRB, then load the pointer with the address in the link TRB. If the |
24 | * link TRB had its toggle bit set, you may need to update the ring cycle |
25 | * state (see cycle bit rules). You may have to do this multiple times |
26 | * until you reach a non-link TRB. |
27 | * 3. A ring is full if enqueue++ (for the definition of increment above) |
28 | * equals the dequeue pointer. |
29 | * |
30 | * Cycle bit rules: |
31 | * 1. When a consumer increments a dequeue pointer and encounters a toggle bit |
32 | * in a link TRB, it must toggle the ring cycle state. |
33 | * 2. When a producer increments an enqueue pointer and encounters a toggle bit |
34 | * in a link TRB, it must toggle the ring cycle state. |
35 | * |
36 | * Producer rules: |
37 | * 1. Check if ring is full before you enqueue. |
38 | * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing. |
39 | * Update enqueue pointer between each write (which may update the ring |
40 | * cycle state). |
41 | * 3. Notify consumer. If SW is producer, it rings the doorbell for command |
42 | * and endpoint rings. If HC is the producer for the event ring, |
43 | * and it generates an interrupt according to interrupt modulation rules. |
44 | * |
45 | * Consumer rules: |
46 | * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state, |
47 | * the TRB is owned by the consumer. |
48 | * 2. Update dequeue pointer (which may update the ring cycle state) and |
49 | * continue processing TRBs until you reach a TRB which is not owned by you. |
50 | * 3. Notify the producer. SW is the consumer for the event ring, and it |
51 | * updates event ring dequeue pointer. HC is the consumer for the command and |
52 | * endpoint rings; it generates events on the event ring for these. |
53 | */ |
54 | |
55 | #include <linux/scatterlist.h> |
56 | #include <linux/slab.h> |
57 | #include <linux/dma-mapping.h> |
58 | #include "xhci.h" |
59 | #include "xhci-trace.h" |
60 | |
61 | static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd, |
62 | u32 field1, u32 field2, |
63 | u32 field3, u32 field4, bool command_must_succeed); |
64 | |
65 | /* |
66 | * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA |
67 | * address of the TRB. |
68 | */ |
69 | dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, |
70 | union xhci_trb *trb) |
71 | { |
72 | unsigned long segment_offset; |
73 | |
74 | if (!seg || !trb || trb < seg->trbs) |
75 | return 0; |
76 | /* offset in TRBs */ |
77 | segment_offset = trb - seg->trbs; |
78 | if (segment_offset >= TRBS_PER_SEGMENT) |
79 | return 0; |
80 | return seg->dma + (segment_offset * sizeof(*trb)); |
81 | } |
82 | |
83 | static bool trb_is_noop(union xhci_trb *trb) |
84 | { |
85 | return TRB_TYPE_NOOP_LE32(trb->generic.field[3]); |
86 | } |
87 | |
88 | static bool trb_is_link(union xhci_trb *trb) |
89 | { |
90 | return TRB_TYPE_LINK_LE32(trb->link.control); |
91 | } |
92 | |
93 | static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb) |
94 | { |
95 | return trb == &seg->trbs[TRBS_PER_SEGMENT - 1]; |
96 | } |
97 | |
98 | static bool last_trb_on_ring(struct xhci_ring *ring, |
99 | struct xhci_segment *seg, union xhci_trb *trb) |
100 | { |
101 | return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg); |
102 | } |
103 | |
104 | static bool link_trb_toggles_cycle(union xhci_trb *trb) |
105 | { |
106 | return le32_to_cpu(trb->link.control) & LINK_TOGGLE; |
107 | } |
108 | |
109 | static bool last_td_in_urb(struct xhci_td *td) |
110 | { |
111 | struct urb_priv *urb_priv = td->urb->hcpriv; |
112 | |
113 | return urb_priv->num_tds_done == urb_priv->num_tds; |
114 | } |
115 | |
116 | static void inc_td_cnt(struct urb *urb) |
117 | { |
118 | struct urb_priv *urb_priv = urb->hcpriv; |
119 | |
120 | urb_priv->num_tds_done++; |
121 | } |
122 | |
123 | static void trb_to_noop(union xhci_trb *trb, u32 noop_type) |
124 | { |
125 | if (trb_is_link(trb)) { |
126 | /* unchain chained link TRBs */ |
127 | trb->link.control &= cpu_to_le32(~TRB_CHAIN); |
128 | } else { |
129 | trb->generic.field[0] = 0; |
130 | trb->generic.field[1] = 0; |
131 | trb->generic.field[2] = 0; |
132 | /* Preserve only the cycle bit of this TRB */ |
133 | trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); |
134 | trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type)); |
135 | } |
136 | } |
137 | |
138 | /* Updates trb to point to the next TRB in the ring, and updates seg if the next |
139 | * TRB is in a new segment. This does not skip over link TRBs, and it does not |
140 | * effect the ring dequeue or enqueue pointers. |
141 | */ |
142 | static void next_trb(struct xhci_hcd *xhci, |
143 | struct xhci_ring *ring, |
144 | struct xhci_segment **seg, |
145 | union xhci_trb **trb) |
146 | { |
147 | if (trb_is_link(trb: *trb) || last_trb_on_seg(seg: *seg, trb: *trb)) { |
148 | *seg = (*seg)->next; |
149 | *trb = ((*seg)->trbs); |
150 | } else { |
151 | (*trb)++; |
152 | } |
153 | } |
154 | |
155 | /* |
156 | * See Cycle bit rules. SW is the consumer for the event ring only. |
157 | */ |
158 | void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring) |
159 | { |
160 | unsigned int link_trb_count = 0; |
161 | |
162 | /* event ring doesn't have link trbs, check for last trb */ |
163 | if (ring->type == TYPE_EVENT) { |
164 | if (!last_trb_on_seg(seg: ring->deq_seg, trb: ring->dequeue)) { |
165 | ring->dequeue++; |
166 | goto out; |
167 | } |
168 | if (last_trb_on_ring(ring, seg: ring->deq_seg, trb: ring->dequeue)) |
169 | ring->cycle_state ^= 1; |
170 | ring->deq_seg = ring->deq_seg->next; |
171 | ring->dequeue = ring->deq_seg->trbs; |
172 | goto out; |
173 | } |
174 | |
175 | /* All other rings have link trbs */ |
176 | if (!trb_is_link(trb: ring->dequeue)) { |
177 | if (last_trb_on_seg(seg: ring->deq_seg, trb: ring->dequeue)) |
178 | xhci_warn(xhci, "Missing link TRB at end of segment\n" ); |
179 | else |
180 | ring->dequeue++; |
181 | } |
182 | |
183 | while (trb_is_link(trb: ring->dequeue)) { |
184 | ring->deq_seg = ring->deq_seg->next; |
185 | ring->dequeue = ring->deq_seg->trbs; |
186 | |
187 | if (link_trb_count++ > ring->num_segs) { |
188 | xhci_warn(xhci, "Ring is an endless link TRB loop\n" ); |
189 | break; |
190 | } |
191 | } |
192 | out: |
193 | trace_xhci_inc_deq(ring); |
194 | |
195 | return; |
196 | } |
197 | |
198 | /* |
199 | * See Cycle bit rules. SW is the consumer for the event ring only. |
200 | * |
201 | * If we've just enqueued a TRB that is in the middle of a TD (meaning the |
202 | * chain bit is set), then set the chain bit in all the following link TRBs. |
203 | * If we've enqueued the last TRB in a TD, make sure the following link TRBs |
204 | * have their chain bit cleared (so that each Link TRB is a separate TD). |
205 | * |
206 | * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit |
207 | * set, but other sections talk about dealing with the chain bit set. This was |
208 | * fixed in the 0.96 specification errata, but we have to assume that all 0.95 |
209 | * xHCI hardware can't handle the chain bit being cleared on a link TRB. |
210 | * |
211 | * @more_trbs_coming: Will you enqueue more TRBs before calling |
212 | * prepare_transfer()? |
213 | */ |
214 | static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, |
215 | bool more_trbs_coming) |
216 | { |
217 | u32 chain; |
218 | union xhci_trb *next; |
219 | unsigned int link_trb_count = 0; |
220 | |
221 | chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN; |
222 | |
223 | if (last_trb_on_seg(seg: ring->enq_seg, trb: ring->enqueue)) { |
224 | xhci_err(xhci, "Tried to move enqueue past ring segment\n" ); |
225 | return; |
226 | } |
227 | |
228 | next = ++(ring->enqueue); |
229 | |
230 | /* Update the dequeue pointer further if that was a link TRB */ |
231 | while (trb_is_link(trb: next)) { |
232 | |
233 | /* |
234 | * If the caller doesn't plan on enqueueing more TDs before |
235 | * ringing the doorbell, then we don't want to give the link TRB |
236 | * to the hardware just yet. We'll give the link TRB back in |
237 | * prepare_ring() just before we enqueue the TD at the top of |
238 | * the ring. |
239 | */ |
240 | if (!chain && !more_trbs_coming) |
241 | break; |
242 | |
243 | /* If we're not dealing with 0.95 hardware or isoc rings on |
244 | * AMD 0.96 host, carry over the chain bit of the previous TRB |
245 | * (which may mean the chain bit is cleared). |
246 | */ |
247 | if (!(ring->type == TYPE_ISOC && |
248 | (xhci->quirks & XHCI_AMD_0x96_HOST)) && |
249 | !xhci_link_trb_quirk(xhci)) { |
250 | next->link.control &= cpu_to_le32(~TRB_CHAIN); |
251 | next->link.control |= cpu_to_le32(chain); |
252 | } |
253 | /* Give this link TRB to the hardware */ |
254 | wmb(); |
255 | next->link.control ^= cpu_to_le32(TRB_CYCLE); |
256 | |
257 | /* Toggle the cycle bit after the last ring segment. */ |
258 | if (link_trb_toggles_cycle(trb: next)) |
259 | ring->cycle_state ^= 1; |
260 | |
261 | ring->enq_seg = ring->enq_seg->next; |
262 | ring->enqueue = ring->enq_seg->trbs; |
263 | next = ring->enqueue; |
264 | |
265 | if (link_trb_count++ > ring->num_segs) { |
266 | xhci_warn(xhci, "%s: Ring link TRB loop\n" , __func__); |
267 | break; |
268 | } |
269 | } |
270 | |
271 | trace_xhci_inc_enq(ring); |
272 | } |
273 | |
274 | /* |
275 | * Return number of free normal TRBs from enqueue to dequeue pointer on ring. |
276 | * Not counting an assumed link TRB at end of each TRBS_PER_SEGMENT sized segment. |
277 | * Only for transfer and command rings where driver is the producer, not for |
278 | * event rings. |
279 | */ |
280 | static unsigned int xhci_num_trbs_free(struct xhci_hcd *xhci, struct xhci_ring *ring) |
281 | { |
282 | struct xhci_segment *enq_seg = ring->enq_seg; |
283 | union xhci_trb *enq = ring->enqueue; |
284 | union xhci_trb *last_on_seg; |
285 | unsigned int free = 0; |
286 | int i = 0; |
287 | |
288 | /* Ring might be empty even if enq != deq if enq is left on a link trb */ |
289 | if (trb_is_link(trb: enq)) { |
290 | enq_seg = enq_seg->next; |
291 | enq = enq_seg->trbs; |
292 | } |
293 | |
294 | /* Empty ring, common case, don't walk the segments */ |
295 | if (enq == ring->dequeue) |
296 | return ring->num_segs * (TRBS_PER_SEGMENT - 1); |
297 | |
298 | do { |
299 | if (ring->deq_seg == enq_seg && ring->dequeue >= enq) |
300 | return free + (ring->dequeue - enq); |
301 | last_on_seg = &enq_seg->trbs[TRBS_PER_SEGMENT - 1]; |
302 | free += last_on_seg - enq; |
303 | enq_seg = enq_seg->next; |
304 | enq = enq_seg->trbs; |
305 | } while (i++ <= ring->num_segs); |
306 | |
307 | return free; |
308 | } |
309 | |
310 | /* |
311 | * Check to see if there's room to enqueue num_trbs on the ring and make sure |
312 | * enqueue pointer will not advance into dequeue segment. See rules above. |
313 | * return number of new segments needed to ensure this. |
314 | */ |
315 | |
316 | static unsigned int xhci_ring_expansion_needed(struct xhci_hcd *xhci, struct xhci_ring *ring, |
317 | unsigned int num_trbs) |
318 | { |
319 | struct xhci_segment *seg; |
320 | int trbs_past_seg; |
321 | int enq_used; |
322 | int new_segs; |
323 | |
324 | enq_used = ring->enqueue - ring->enq_seg->trbs; |
325 | |
326 | /* how many trbs will be queued past the enqueue segment? */ |
327 | trbs_past_seg = enq_used + num_trbs - (TRBS_PER_SEGMENT - 1); |
328 | |
329 | if (trbs_past_seg <= 0) |
330 | return 0; |
331 | |
332 | /* Empty ring special case, enqueue stuck on link trb while dequeue advanced */ |
333 | if (trb_is_link(trb: ring->enqueue) && ring->enq_seg->next->trbs == ring->dequeue) |
334 | return 0; |
335 | |
336 | new_segs = 1 + (trbs_past_seg / (TRBS_PER_SEGMENT - 1)); |
337 | seg = ring->enq_seg; |
338 | |
339 | while (new_segs > 0) { |
340 | seg = seg->next; |
341 | if (seg == ring->deq_seg) { |
342 | xhci_dbg(xhci, "Ring expansion by %d segments needed\n" , |
343 | new_segs); |
344 | xhci_dbg(xhci, "Adding %d trbs moves enq %d trbs into deq seg\n" , |
345 | num_trbs, trbs_past_seg % TRBS_PER_SEGMENT); |
346 | return new_segs; |
347 | } |
348 | new_segs--; |
349 | } |
350 | |
351 | return 0; |
352 | } |
353 | |
354 | /* Ring the host controller doorbell after placing a command on the ring */ |
355 | void xhci_ring_cmd_db(struct xhci_hcd *xhci) |
356 | { |
357 | if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) |
358 | return; |
359 | |
360 | xhci_dbg(xhci, "// Ding dong!\n" ); |
361 | |
362 | trace_xhci_ring_host_doorbell(slot: 0, DB_VALUE_HOST); |
363 | |
364 | writel(DB_VALUE_HOST, addr: &xhci->dba->doorbell[0]); |
365 | /* Flush PCI posted writes */ |
366 | readl(addr: &xhci->dba->doorbell[0]); |
367 | } |
368 | |
369 | static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci, unsigned long delay) |
370 | { |
371 | return mod_delayed_work(wq: system_wq, dwork: &xhci->cmd_timer, delay); |
372 | } |
373 | |
374 | static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci) |
375 | { |
376 | return list_first_entry_or_null(&xhci->cmd_list, struct xhci_command, |
377 | cmd_list); |
378 | } |
379 | |
380 | /* |
381 | * Turn all commands on command ring with status set to "aborted" to no-op trbs. |
382 | * If there are other commands waiting then restart the ring and kick the timer. |
383 | * This must be called with command ring stopped and xhci->lock held. |
384 | */ |
385 | static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci, |
386 | struct xhci_command *cur_cmd) |
387 | { |
388 | struct xhci_command *i_cmd; |
389 | |
390 | /* Turn all aborted commands in list to no-ops, then restart */ |
391 | list_for_each_entry(i_cmd, &xhci->cmd_list, cmd_list) { |
392 | |
393 | if (i_cmd->status != COMP_COMMAND_ABORTED) |
394 | continue; |
395 | |
396 | i_cmd->status = COMP_COMMAND_RING_STOPPED; |
397 | |
398 | xhci_dbg(xhci, "Turn aborted command %p to no-op\n" , |
399 | i_cmd->command_trb); |
400 | |
401 | trb_to_noop(trb: i_cmd->command_trb, TRB_CMD_NOOP); |
402 | |
403 | /* |
404 | * caller waiting for completion is called when command |
405 | * completion event is received for these no-op commands |
406 | */ |
407 | } |
408 | |
409 | xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; |
410 | |
411 | /* ring command ring doorbell to restart the command ring */ |
412 | if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) && |
413 | !(xhci->xhc_state & XHCI_STATE_DYING)) { |
414 | xhci->current_cmd = cur_cmd; |
415 | xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT); |
416 | xhci_ring_cmd_db(xhci); |
417 | } |
418 | } |
419 | |
420 | /* Must be called with xhci->lock held, releases and aquires lock back */ |
421 | static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags) |
422 | { |
423 | struct xhci_segment *new_seg = xhci->cmd_ring->deq_seg; |
424 | union xhci_trb *new_deq = xhci->cmd_ring->dequeue; |
425 | u64 crcr; |
426 | int ret; |
427 | |
428 | xhci_dbg(xhci, "Abort command ring\n" ); |
429 | |
430 | reinit_completion(x: &xhci->cmd_ring_stop_completion); |
431 | |
432 | /* |
433 | * The control bits like command stop, abort are located in lower |
434 | * dword of the command ring control register. |
435 | * Some controllers require all 64 bits to be written to abort the ring. |
436 | * Make sure the upper dword is valid, pointing to the next command, |
437 | * avoiding corrupting the command ring pointer in case the command ring |
438 | * is stopped by the time the upper dword is written. |
439 | */ |
440 | next_trb(xhci, NULL, seg: &new_seg, trb: &new_deq); |
441 | if (trb_is_link(trb: new_deq)) |
442 | next_trb(xhci, NULL, seg: &new_seg, trb: &new_deq); |
443 | |
444 | crcr = xhci_trb_virt_to_dma(seg: new_seg, trb: new_deq); |
445 | xhci_write_64(xhci, val: crcr | CMD_RING_ABORT, regs: &xhci->op_regs->cmd_ring); |
446 | |
447 | /* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the |
448 | * completion of the Command Abort operation. If CRR is not negated in 5 |
449 | * seconds then driver handles it as if host died (-ENODEV). |
450 | * In the future we should distinguish between -ENODEV and -ETIMEDOUT |
451 | * and try to recover a -ETIMEDOUT with a host controller reset. |
452 | */ |
453 | ret = xhci_handshake_check_state(xhci, ptr: &xhci->op_regs->cmd_ring, |
454 | CMD_RING_RUNNING, done: 0, usec: 5 * 1000 * 1000, |
455 | XHCI_STATE_REMOVING); |
456 | if (ret < 0) { |
457 | xhci_err(xhci, "Abort failed to stop command ring: %d\n" , ret); |
458 | xhci_halt(xhci); |
459 | xhci_hc_died(xhci); |
460 | return ret; |
461 | } |
462 | /* |
463 | * Writing the CMD_RING_ABORT bit should cause a cmd completion event, |
464 | * however on some host hw the CMD_RING_RUNNING bit is correctly cleared |
465 | * but the completion event in never sent. Wait 2 secs (arbitrary |
466 | * number) to handle those cases after negation of CMD_RING_RUNNING. |
467 | */ |
468 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
469 | ret = wait_for_completion_timeout(x: &xhci->cmd_ring_stop_completion, |
470 | timeout: msecs_to_jiffies(m: 2000)); |
471 | spin_lock_irqsave(&xhci->lock, flags); |
472 | if (!ret) { |
473 | xhci_dbg(xhci, "No stop event for abort, ring start fail?\n" ); |
474 | xhci_cleanup_command_queue(xhci); |
475 | } else { |
476 | xhci_handle_stopped_cmd_ring(xhci, cur_cmd: xhci_next_queued_cmd(xhci)); |
477 | } |
478 | return 0; |
479 | } |
480 | |
481 | void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, |
482 | unsigned int slot_id, |
483 | unsigned int ep_index, |
484 | unsigned int stream_id) |
485 | { |
486 | __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; |
487 | struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; |
488 | unsigned int ep_state = ep->ep_state; |
489 | |
490 | /* Don't ring the doorbell for this endpoint if there are pending |
491 | * cancellations because we don't want to interrupt processing. |
492 | * We don't want to restart any stream rings if there's a set dequeue |
493 | * pointer command pending because the device can choose to start any |
494 | * stream once the endpoint is on the HW schedule. |
495 | */ |
496 | if ((ep_state & EP_STOP_CMD_PENDING) || (ep_state & SET_DEQ_PENDING) || |
497 | (ep_state & EP_HALTED) || (ep_state & EP_CLEARING_TT)) |
498 | return; |
499 | |
500 | trace_xhci_ring_ep_doorbell(slot: slot_id, DB_VALUE(ep_index, stream_id)); |
501 | |
502 | writel(DB_VALUE(ep_index, stream_id), addr: db_addr); |
503 | /* flush the write */ |
504 | readl(addr: db_addr); |
505 | } |
506 | |
507 | /* Ring the doorbell for any rings with pending URBs */ |
508 | static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci, |
509 | unsigned int slot_id, |
510 | unsigned int ep_index) |
511 | { |
512 | unsigned int stream_id; |
513 | struct xhci_virt_ep *ep; |
514 | |
515 | ep = &xhci->devs[slot_id]->eps[ep_index]; |
516 | |
517 | /* A ring has pending URBs if its TD list is not empty */ |
518 | if (!(ep->ep_state & EP_HAS_STREAMS)) { |
519 | if (ep->ring && !(list_empty(head: &ep->ring->td_list))) |
520 | xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id: 0); |
521 | return; |
522 | } |
523 | |
524 | for (stream_id = 1; stream_id < ep->stream_info->num_streams; |
525 | stream_id++) { |
526 | struct xhci_stream_info *stream_info = ep->stream_info; |
527 | if (!list_empty(head: &stream_info->stream_rings[stream_id]->td_list)) |
528 | xhci_ring_ep_doorbell(xhci, slot_id, ep_index, |
529 | stream_id); |
530 | } |
531 | } |
532 | |
533 | void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci, |
534 | unsigned int slot_id, |
535 | unsigned int ep_index) |
536 | { |
537 | ring_doorbell_for_active_rings(xhci, slot_id, ep_index); |
538 | } |
539 | |
540 | static struct xhci_virt_ep *xhci_get_virt_ep(struct xhci_hcd *xhci, |
541 | unsigned int slot_id, |
542 | unsigned int ep_index) |
543 | { |
544 | if (slot_id == 0 || slot_id >= MAX_HC_SLOTS) { |
545 | xhci_warn(xhci, "Invalid slot_id %u\n" , slot_id); |
546 | return NULL; |
547 | } |
548 | if (ep_index >= EP_CTX_PER_DEV) { |
549 | xhci_warn(xhci, "Invalid endpoint index %u\n" , ep_index); |
550 | return NULL; |
551 | } |
552 | if (!xhci->devs[slot_id]) { |
553 | xhci_warn(xhci, "No xhci virt device for slot_id %u\n" , slot_id); |
554 | return NULL; |
555 | } |
556 | |
557 | return &xhci->devs[slot_id]->eps[ep_index]; |
558 | } |
559 | |
560 | static struct xhci_ring *xhci_virt_ep_to_ring(struct xhci_hcd *xhci, |
561 | struct xhci_virt_ep *ep, |
562 | unsigned int stream_id) |
563 | { |
564 | /* common case, no streams */ |
565 | if (!(ep->ep_state & EP_HAS_STREAMS)) |
566 | return ep->ring; |
567 | |
568 | if (!ep->stream_info) |
569 | return NULL; |
570 | |
571 | if (stream_id == 0 || stream_id >= ep->stream_info->num_streams) { |
572 | xhci_warn(xhci, "Invalid stream_id %u request for slot_id %u ep_index %u\n" , |
573 | stream_id, ep->vdev->slot_id, ep->ep_index); |
574 | return NULL; |
575 | } |
576 | |
577 | return ep->stream_info->stream_rings[stream_id]; |
578 | } |
579 | |
580 | /* Get the right ring for the given slot_id, ep_index and stream_id. |
581 | * If the endpoint supports streams, boundary check the URB's stream ID. |
582 | * If the endpoint doesn't support streams, return the singular endpoint ring. |
583 | */ |
584 | struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, |
585 | unsigned int slot_id, unsigned int ep_index, |
586 | unsigned int stream_id) |
587 | { |
588 | struct xhci_virt_ep *ep; |
589 | |
590 | ep = xhci_get_virt_ep(xhci, slot_id, ep_index); |
591 | if (!ep) |
592 | return NULL; |
593 | |
594 | return xhci_virt_ep_to_ring(xhci, ep, stream_id); |
595 | } |
596 | |
597 | |
598 | /* |
599 | * Get the hw dequeue pointer xHC stopped on, either directly from the |
600 | * endpoint context, or if streams are in use from the stream context. |
601 | * The returned hw_dequeue contains the lowest four bits with cycle state |
602 | * and possbile stream context type. |
603 | */ |
604 | static u64 xhci_get_hw_deq(struct xhci_hcd *xhci, struct xhci_virt_device *vdev, |
605 | unsigned int ep_index, unsigned int stream_id) |
606 | { |
607 | struct xhci_ep_ctx *ep_ctx; |
608 | struct xhci_stream_ctx *st_ctx; |
609 | struct xhci_virt_ep *ep; |
610 | |
611 | ep = &vdev->eps[ep_index]; |
612 | |
613 | if (ep->ep_state & EP_HAS_STREAMS) { |
614 | st_ctx = &ep->stream_info->stream_ctx_array[stream_id]; |
615 | return le64_to_cpu(st_ctx->stream_ring); |
616 | } |
617 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: vdev->out_ctx, ep_index); |
618 | return le64_to_cpu(ep_ctx->deq); |
619 | } |
620 | |
621 | static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci, |
622 | unsigned int slot_id, unsigned int ep_index, |
623 | unsigned int stream_id, struct xhci_td *td) |
624 | { |
625 | struct xhci_virt_device *dev = xhci->devs[slot_id]; |
626 | struct xhci_virt_ep *ep = &dev->eps[ep_index]; |
627 | struct xhci_ring *ep_ring; |
628 | struct xhci_command *cmd; |
629 | struct xhci_segment *new_seg; |
630 | union xhci_trb *new_deq; |
631 | int new_cycle; |
632 | dma_addr_t addr; |
633 | u64 hw_dequeue; |
634 | bool cycle_found = false; |
635 | bool td_last_trb_found = false; |
636 | u32 trb_sct = 0; |
637 | int ret; |
638 | |
639 | ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id, |
640 | ep_index, stream_id); |
641 | if (!ep_ring) { |
642 | xhci_warn(xhci, "WARN can't find new dequeue, invalid stream ID %u\n" , |
643 | stream_id); |
644 | return -ENODEV; |
645 | } |
646 | /* |
647 | * A cancelled TD can complete with a stall if HW cached the trb. |
648 | * In this case driver can't find td, but if the ring is empty we |
649 | * can move the dequeue pointer to the current enqueue position. |
650 | * We shouldn't hit this anymore as cached cancelled TRBs are given back |
651 | * after clearing the cache, but be on the safe side and keep it anyway |
652 | */ |
653 | if (!td) { |
654 | if (list_empty(head: &ep_ring->td_list)) { |
655 | new_seg = ep_ring->enq_seg; |
656 | new_deq = ep_ring->enqueue; |
657 | new_cycle = ep_ring->cycle_state; |
658 | xhci_dbg(xhci, "ep ring empty, Set new dequeue = enqueue" ); |
659 | goto deq_found; |
660 | } else { |
661 | xhci_warn(xhci, "Can't find new dequeue state, missing td\n" ); |
662 | return -EINVAL; |
663 | } |
664 | } |
665 | |
666 | hw_dequeue = xhci_get_hw_deq(xhci, vdev: dev, ep_index, stream_id); |
667 | new_seg = ep_ring->deq_seg; |
668 | new_deq = ep_ring->dequeue; |
669 | new_cycle = hw_dequeue & 0x1; |
670 | |
671 | /* |
672 | * We want to find the pointer, segment and cycle state of the new trb |
673 | * (the one after current TD's last_trb). We know the cycle state at |
674 | * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are |
675 | * found. |
676 | */ |
677 | do { |
678 | if (!cycle_found && xhci_trb_virt_to_dma(seg: new_seg, trb: new_deq) |
679 | == (dma_addr_t)(hw_dequeue & ~0xf)) { |
680 | cycle_found = true; |
681 | if (td_last_trb_found) |
682 | break; |
683 | } |
684 | if (new_deq == td->last_trb) |
685 | td_last_trb_found = true; |
686 | |
687 | if (cycle_found && trb_is_link(trb: new_deq) && |
688 | link_trb_toggles_cycle(trb: new_deq)) |
689 | new_cycle ^= 0x1; |
690 | |
691 | next_trb(xhci, ring: ep_ring, seg: &new_seg, trb: &new_deq); |
692 | |
693 | /* Search wrapped around, bail out */ |
694 | if (new_deq == ep->ring->dequeue) { |
695 | xhci_err(xhci, "Error: Failed finding new dequeue state\n" ); |
696 | return -EINVAL; |
697 | } |
698 | |
699 | } while (!cycle_found || !td_last_trb_found); |
700 | |
701 | deq_found: |
702 | |
703 | /* Don't update the ring cycle state for the producer (us). */ |
704 | addr = xhci_trb_virt_to_dma(seg: new_seg, trb: new_deq); |
705 | if (addr == 0) { |
706 | xhci_warn(xhci, "Can't find dma of new dequeue ptr\n" ); |
707 | xhci_warn(xhci, "deq seg = %p, deq ptr = %p\n" , new_seg, new_deq); |
708 | return -EINVAL; |
709 | } |
710 | |
711 | if ((ep->ep_state & SET_DEQ_PENDING)) { |
712 | xhci_warn(xhci, "Set TR Deq already pending, don't submit for 0x%pad\n" , |
713 | &addr); |
714 | return -EBUSY; |
715 | } |
716 | |
717 | /* This function gets called from contexts where it cannot sleep */ |
718 | cmd = xhci_alloc_command(xhci, allocate_completion: false, GFP_ATOMIC); |
719 | if (!cmd) { |
720 | xhci_warn(xhci, "Can't alloc Set TR Deq cmd 0x%pad\n" , &addr); |
721 | return -ENOMEM; |
722 | } |
723 | |
724 | if (stream_id) |
725 | trb_sct = SCT_FOR_TRB(SCT_PRI_TR); |
726 | ret = queue_command(xhci, cmd, |
727 | lower_32_bits(addr) | trb_sct | new_cycle, |
728 | upper_32_bits(addr), |
729 | STREAM_ID_FOR_TRB(stream_id), SLOT_ID_FOR_TRB(slot_id) | |
730 | EP_ID_FOR_TRB(ep_index) | TRB_TYPE(TRB_SET_DEQ), command_must_succeed: false); |
731 | if (ret < 0) { |
732 | xhci_free_command(xhci, command: cmd); |
733 | return ret; |
734 | } |
735 | ep->queued_deq_seg = new_seg; |
736 | ep->queued_deq_ptr = new_deq; |
737 | |
738 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_cancel_urb, |
739 | fmt: "Set TR Deq ptr 0x%llx, cycle %u\n" , addr, new_cycle); |
740 | |
741 | /* Stop the TD queueing code from ringing the doorbell until |
742 | * this command completes. The HC won't set the dequeue pointer |
743 | * if the ring is running, and ringing the doorbell starts the |
744 | * ring running. |
745 | */ |
746 | ep->ep_state |= SET_DEQ_PENDING; |
747 | xhci_ring_cmd_db(xhci); |
748 | return 0; |
749 | } |
750 | |
751 | /* flip_cycle means flip the cycle bit of all but the first and last TRB. |
752 | * (The last TRB actually points to the ring enqueue pointer, which is not part |
753 | * of this TD.) This is used to remove partially enqueued isoc TDs from a ring. |
754 | */ |
755 | static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, |
756 | struct xhci_td *td, bool flip_cycle) |
757 | { |
758 | struct xhci_segment *seg = td->start_seg; |
759 | union xhci_trb *trb = td->first_trb; |
760 | |
761 | while (1) { |
762 | trb_to_noop(trb, TRB_TR_NOOP); |
763 | |
764 | /* flip cycle if asked to */ |
765 | if (flip_cycle && trb != td->first_trb && trb != td->last_trb) |
766 | trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE); |
767 | |
768 | if (trb == td->last_trb) |
769 | break; |
770 | |
771 | next_trb(xhci, ring: ep_ring, seg: &seg, trb: &trb); |
772 | } |
773 | } |
774 | |
775 | /* |
776 | * Must be called with xhci->lock held in interrupt context, |
777 | * releases and re-acquires xhci->lock |
778 | */ |
779 | static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, |
780 | struct xhci_td *cur_td, int status) |
781 | { |
782 | struct urb *urb = cur_td->urb; |
783 | struct urb_priv *urb_priv = urb->hcpriv; |
784 | struct usb_hcd *hcd = bus_to_hcd(bus: urb->dev->bus); |
785 | |
786 | if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { |
787 | xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; |
788 | if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { |
789 | if (xhci->quirks & XHCI_AMD_PLL_FIX) |
790 | usb_amd_quirk_pll_enable(); |
791 | } |
792 | } |
793 | xhci_urb_free_priv(urb_priv); |
794 | usb_hcd_unlink_urb_from_ep(hcd, urb); |
795 | trace_xhci_urb_giveback(urb); |
796 | usb_hcd_giveback_urb(hcd, urb, status); |
797 | } |
798 | |
799 | static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, |
800 | struct xhci_ring *ring, struct xhci_td *td) |
801 | { |
802 | struct device *dev = xhci_to_hcd(xhci)->self.sysdev; |
803 | struct xhci_segment *seg = td->bounce_seg; |
804 | struct urb *urb = td->urb; |
805 | size_t len; |
806 | |
807 | if (!ring || !seg || !urb) |
808 | return; |
809 | |
810 | if (usb_urb_dir_out(urb)) { |
811 | dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, |
812 | DMA_TO_DEVICE); |
813 | return; |
814 | } |
815 | |
816 | dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, |
817 | DMA_FROM_DEVICE); |
818 | /* for in tranfers we need to copy the data from bounce to sg */ |
819 | if (urb->num_sgs) { |
820 | len = sg_pcopy_from_buffer(sgl: urb->sg, nents: urb->num_sgs, buf: seg->bounce_buf, |
821 | buflen: seg->bounce_len, skip: seg->bounce_offs); |
822 | if (len != seg->bounce_len) |
823 | xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n" , |
824 | len, seg->bounce_len); |
825 | } else { |
826 | memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf, |
827 | seg->bounce_len); |
828 | } |
829 | seg->bounce_len = 0; |
830 | seg->bounce_offs = 0; |
831 | } |
832 | |
833 | static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td, |
834 | struct xhci_ring *ep_ring, int status) |
835 | { |
836 | struct urb *urb = NULL; |
837 | |
838 | /* Clean up the endpoint's TD list */ |
839 | urb = td->urb; |
840 | |
841 | /* if a bounce buffer was used to align this td then unmap it */ |
842 | xhci_unmap_td_bounce_buffer(xhci, ring: ep_ring, td); |
843 | |
844 | /* Do one last check of the actual transfer length. |
845 | * If the host controller said we transferred more data than the buffer |
846 | * length, urb->actual_length will be a very big number (since it's |
847 | * unsigned). Play it safe and say we didn't transfer anything. |
848 | */ |
849 | if (urb->actual_length > urb->transfer_buffer_length) { |
850 | xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n" , |
851 | urb->transfer_buffer_length, urb->actual_length); |
852 | urb->actual_length = 0; |
853 | status = 0; |
854 | } |
855 | /* TD might be removed from td_list if we are giving back a cancelled URB */ |
856 | if (!list_empty(head: &td->td_list)) |
857 | list_del_init(entry: &td->td_list); |
858 | /* Giving back a cancelled URB, or if a slated TD completed anyway */ |
859 | if (!list_empty(head: &td->cancelled_td_list)) |
860 | list_del_init(entry: &td->cancelled_td_list); |
861 | |
862 | inc_td_cnt(urb); |
863 | /* Giveback the urb when all the tds are completed */ |
864 | if (last_td_in_urb(td)) { |
865 | if ((urb->actual_length != urb->transfer_buffer_length && |
866 | (urb->transfer_flags & URB_SHORT_NOT_OK)) || |
867 | (status != 0 && !usb_endpoint_xfer_isoc(epd: &urb->ep->desc))) |
868 | xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n" , |
869 | urb, urb->actual_length, |
870 | urb->transfer_buffer_length, status); |
871 | |
872 | /* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */ |
873 | if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) |
874 | status = 0; |
875 | xhci_giveback_urb_in_irq(xhci, cur_td: td, status); |
876 | } |
877 | |
878 | return 0; |
879 | } |
880 | |
881 | |
882 | /* Complete the cancelled URBs we unlinked from td_list. */ |
883 | static void xhci_giveback_invalidated_tds(struct xhci_virt_ep *ep) |
884 | { |
885 | struct xhci_ring *ring; |
886 | struct xhci_td *td, *tmp_td; |
887 | |
888 | list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, |
889 | cancelled_td_list) { |
890 | |
891 | ring = xhci_urb_to_transfer_ring(xhci: ep->xhci, urb: td->urb); |
892 | |
893 | if (td->cancel_status == TD_CLEARED) { |
894 | xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n" , |
895 | __func__, td->urb); |
896 | xhci_td_cleanup(xhci: ep->xhci, td, ep_ring: ring, status: td->status); |
897 | } else { |
898 | xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n" , |
899 | __func__, td->urb, td->cancel_status); |
900 | } |
901 | if (ep->xhci->xhc_state & XHCI_STATE_DYING) |
902 | return; |
903 | } |
904 | } |
905 | |
906 | static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id, |
907 | unsigned int ep_index, enum xhci_ep_reset_type reset_type) |
908 | { |
909 | struct xhci_command *command; |
910 | int ret = 0; |
911 | |
912 | command = xhci_alloc_command(xhci, allocate_completion: false, GFP_ATOMIC); |
913 | if (!command) { |
914 | ret = -ENOMEM; |
915 | goto done; |
916 | } |
917 | |
918 | xhci_dbg(xhci, "%s-reset ep %u, slot %u\n" , |
919 | (reset_type == EP_HARD_RESET) ? "Hard" : "Soft" , |
920 | ep_index, slot_id); |
921 | |
922 | ret = xhci_queue_reset_ep(xhci, cmd: command, slot_id, ep_index, reset_type); |
923 | done: |
924 | if (ret) |
925 | xhci_err(xhci, "ERROR queuing reset endpoint for slot %d ep_index %d, %d\n" , |
926 | slot_id, ep_index, ret); |
927 | return ret; |
928 | } |
929 | |
930 | static int xhci_handle_halted_endpoint(struct xhci_hcd *xhci, |
931 | struct xhci_virt_ep *ep, |
932 | struct xhci_td *td, |
933 | enum xhci_ep_reset_type reset_type) |
934 | { |
935 | unsigned int slot_id = ep->vdev->slot_id; |
936 | int err; |
937 | |
938 | /* |
939 | * Avoid resetting endpoint if link is inactive. Can cause host hang. |
940 | * Device will be reset soon to recover the link so don't do anything |
941 | */ |
942 | if (ep->vdev->flags & VDEV_PORT_ERROR) |
943 | return -ENODEV; |
944 | |
945 | /* add td to cancelled list and let reset ep handler take care of it */ |
946 | if (reset_type == EP_HARD_RESET) { |
947 | ep->ep_state |= EP_HARD_CLEAR_TOGGLE; |
948 | if (td && list_empty(head: &td->cancelled_td_list)) { |
949 | list_add_tail(new: &td->cancelled_td_list, head: &ep->cancelled_td_list); |
950 | td->cancel_status = TD_HALTED; |
951 | } |
952 | } |
953 | |
954 | if (ep->ep_state & EP_HALTED) { |
955 | xhci_dbg(xhci, "Reset ep command for ep_index %d already pending\n" , |
956 | ep->ep_index); |
957 | return 0; |
958 | } |
959 | |
960 | err = xhci_reset_halted_ep(xhci, slot_id, ep_index: ep->ep_index, reset_type); |
961 | if (err) |
962 | return err; |
963 | |
964 | ep->ep_state |= EP_HALTED; |
965 | |
966 | xhci_ring_cmd_db(xhci); |
967 | |
968 | return 0; |
969 | } |
970 | |
971 | /* |
972 | * Fix up the ep ring first, so HW stops executing cancelled TDs. |
973 | * We have the xHCI lock, so nothing can modify this list until we drop it. |
974 | * We're also in the event handler, so we can't get re-interrupted if another |
975 | * Stop Endpoint command completes. |
976 | * |
977 | * only call this when ring is not in a running state |
978 | */ |
979 | |
980 | static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep) |
981 | { |
982 | struct xhci_hcd *xhci; |
983 | struct xhci_td *td = NULL; |
984 | struct xhci_td *tmp_td = NULL; |
985 | struct xhci_td *cached_td = NULL; |
986 | struct xhci_ring *ring; |
987 | u64 hw_deq; |
988 | unsigned int slot_id = ep->vdev->slot_id; |
989 | int err; |
990 | |
991 | xhci = ep->xhci; |
992 | |
993 | list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) { |
994 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_cancel_urb, |
995 | fmt: "Removing canceled TD starting at 0x%llx (dma) in stream %u URB %p" , |
996 | (unsigned long long)xhci_trb_virt_to_dma( |
997 | seg: td->start_seg, trb: td->first_trb), |
998 | td->urb->stream_id, td->urb); |
999 | list_del_init(entry: &td->td_list); |
1000 | ring = xhci_urb_to_transfer_ring(xhci, urb: td->urb); |
1001 | if (!ring) { |
1002 | xhci_warn(xhci, "WARN Cancelled URB %p has invalid stream ID %u.\n" , |
1003 | td->urb, td->urb->stream_id); |
1004 | continue; |
1005 | } |
1006 | /* |
1007 | * If a ring stopped on the TD we need to cancel then we have to |
1008 | * move the xHC endpoint ring dequeue pointer past this TD. |
1009 | * Rings halted due to STALL may show hw_deq is past the stalled |
1010 | * TD, but still require a set TR Deq command to flush xHC cache. |
1011 | */ |
1012 | hw_deq = xhci_get_hw_deq(xhci, vdev: ep->vdev, ep_index: ep->ep_index, |
1013 | stream_id: td->urb->stream_id); |
1014 | hw_deq &= ~0xf; |
1015 | |
1016 | if (td->cancel_status == TD_HALTED || |
1017 | trb_in_td(xhci, start_seg: td->start_seg, start_trb: td->first_trb, end_trb: td->last_trb, suspect_dma: hw_deq, debug: false)) { |
1018 | switch (td->cancel_status) { |
1019 | case TD_CLEARED: /* TD is already no-op */ |
1020 | case TD_CLEARING_CACHE: /* set TR deq command already queued */ |
1021 | break; |
1022 | case TD_DIRTY: /* TD is cached, clear it */ |
1023 | case TD_HALTED: |
1024 | td->cancel_status = TD_CLEARING_CACHE; |
1025 | if (cached_td) |
1026 | /* FIXME stream case, several stopped rings */ |
1027 | xhci_dbg(xhci, |
1028 | "Move dq past stream %u URB %p instead of stream %u URB %p\n" , |
1029 | td->urb->stream_id, td->urb, |
1030 | cached_td->urb->stream_id, cached_td->urb); |
1031 | cached_td = td; |
1032 | break; |
1033 | } |
1034 | } else { |
1035 | td_to_noop(xhci, ep_ring: ring, td, flip_cycle: false); |
1036 | td->cancel_status = TD_CLEARED; |
1037 | } |
1038 | } |
1039 | |
1040 | /* If there's no need to move the dequeue pointer then we're done */ |
1041 | if (!cached_td) |
1042 | return 0; |
1043 | |
1044 | err = xhci_move_dequeue_past_td(xhci, slot_id, ep_index: ep->ep_index, |
1045 | stream_id: cached_td->urb->stream_id, |
1046 | td: cached_td); |
1047 | if (err) { |
1048 | /* Failed to move past cached td, just set cached TDs to no-op */ |
1049 | list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) { |
1050 | if (td->cancel_status != TD_CLEARING_CACHE) |
1051 | continue; |
1052 | xhci_dbg(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n" , |
1053 | td->urb); |
1054 | td_to_noop(xhci, ep_ring: ring, td, flip_cycle: false); |
1055 | td->cancel_status = TD_CLEARED; |
1056 | } |
1057 | } |
1058 | return 0; |
1059 | } |
1060 | |
1061 | /* |
1062 | * Returns the TD the endpoint ring halted on. |
1063 | * Only call for non-running rings without streams. |
1064 | */ |
1065 | static struct xhci_td *find_halted_td(struct xhci_virt_ep *ep) |
1066 | { |
1067 | struct xhci_td *td; |
1068 | u64 hw_deq; |
1069 | |
1070 | if (!list_empty(head: &ep->ring->td_list)) { /* Not streams compatible */ |
1071 | hw_deq = xhci_get_hw_deq(xhci: ep->xhci, vdev: ep->vdev, ep_index: ep->ep_index, stream_id: 0); |
1072 | hw_deq &= ~0xf; |
1073 | td = list_first_entry(&ep->ring->td_list, struct xhci_td, td_list); |
1074 | if (trb_in_td(xhci: ep->xhci, start_seg: td->start_seg, start_trb: td->first_trb, |
1075 | end_trb: td->last_trb, suspect_dma: hw_deq, debug: false)) |
1076 | return td; |
1077 | } |
1078 | return NULL; |
1079 | } |
1080 | |
1081 | /* |
1082 | * When we get a command completion for a Stop Endpoint Command, we need to |
1083 | * unlink any cancelled TDs from the ring. There are two ways to do that: |
1084 | * |
1085 | * 1. If the HW was in the middle of processing the TD that needs to be |
1086 | * cancelled, then we must move the ring's dequeue pointer past the last TRB |
1087 | * in the TD with a Set Dequeue Pointer Command. |
1088 | * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain |
1089 | * bit cleared) so that the HW will skip over them. |
1090 | */ |
1091 | static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, |
1092 | union xhci_trb *trb, u32 comp_code) |
1093 | { |
1094 | unsigned int ep_index; |
1095 | struct xhci_virt_ep *ep; |
1096 | struct xhci_ep_ctx *ep_ctx; |
1097 | struct xhci_td *td = NULL; |
1098 | enum xhci_ep_reset_type reset_type; |
1099 | struct xhci_command *command; |
1100 | int err; |
1101 | |
1102 | if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) { |
1103 | if (!xhci->devs[slot_id]) |
1104 | xhci_warn(xhci, "Stop endpoint command completion for disabled slot %u\n" , |
1105 | slot_id); |
1106 | return; |
1107 | } |
1108 | |
1109 | ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); |
1110 | ep = xhci_get_virt_ep(xhci, slot_id, ep_index); |
1111 | if (!ep) |
1112 | return; |
1113 | |
1114 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: ep->vdev->out_ctx, ep_index); |
1115 | |
1116 | trace_xhci_handle_cmd_stop_ep(ctx: ep_ctx); |
1117 | |
1118 | if (comp_code == COMP_CONTEXT_STATE_ERROR) { |
1119 | /* |
1120 | * If stop endpoint command raced with a halting endpoint we need to |
1121 | * reset the host side endpoint first. |
1122 | * If the TD we halted on isn't cancelled the TD should be given back |
1123 | * with a proper error code, and the ring dequeue moved past the TD. |
1124 | * If streams case we can't find hw_deq, or the TD we halted on so do a |
1125 | * soft reset. |
1126 | * |
1127 | * Proper error code is unknown here, it would be -EPIPE if device side |
1128 | * of enadpoit halted (aka STALL), and -EPROTO if not (transaction error) |
1129 | * We use -EPROTO, if device is stalled it should return a stall error on |
1130 | * next transfer, which then will return -EPIPE, and device side stall is |
1131 | * noted and cleared by class driver. |
1132 | */ |
1133 | switch (GET_EP_CTX_STATE(ep_ctx)) { |
1134 | case EP_STATE_HALTED: |
1135 | xhci_dbg(xhci, "Stop ep completion raced with stall, reset ep\n" ); |
1136 | if (ep->ep_state & EP_HAS_STREAMS) { |
1137 | reset_type = EP_SOFT_RESET; |
1138 | } else { |
1139 | reset_type = EP_HARD_RESET; |
1140 | td = find_halted_td(ep); |
1141 | if (td) |
1142 | td->status = -EPROTO; |
1143 | } |
1144 | /* reset ep, reset handler cleans up cancelled tds */ |
1145 | err = xhci_handle_halted_endpoint(xhci, ep, td, reset_type); |
1146 | if (err) |
1147 | break; |
1148 | ep->ep_state &= ~EP_STOP_CMD_PENDING; |
1149 | return; |
1150 | case EP_STATE_RUNNING: |
1151 | /* Race, HW handled stop ep cmd before ep was running */ |
1152 | xhci_dbg(xhci, "Stop ep completion ctx error, ep is running\n" ); |
1153 | |
1154 | command = xhci_alloc_command(xhci, allocate_completion: false, GFP_ATOMIC); |
1155 | if (!command) { |
1156 | ep->ep_state &= ~EP_STOP_CMD_PENDING; |
1157 | return; |
1158 | } |
1159 | xhci_queue_stop_endpoint(xhci, cmd: command, slot_id, ep_index, suspend: 0); |
1160 | xhci_ring_cmd_db(xhci); |
1161 | |
1162 | return; |
1163 | default: |
1164 | break; |
1165 | } |
1166 | } |
1167 | |
1168 | /* will queue a set TR deq if stopped on a cancelled, uncleared TD */ |
1169 | xhci_invalidate_cancelled_tds(ep); |
1170 | ep->ep_state &= ~EP_STOP_CMD_PENDING; |
1171 | |
1172 | /* Otherwise ring the doorbell(s) to restart queued transfers */ |
1173 | xhci_giveback_invalidated_tds(ep); |
1174 | ring_doorbell_for_active_rings(xhci, slot_id, ep_index); |
1175 | } |
1176 | |
1177 | static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring) |
1178 | { |
1179 | struct xhci_td *cur_td; |
1180 | struct xhci_td *tmp; |
1181 | |
1182 | list_for_each_entry_safe(cur_td, tmp, &ring->td_list, td_list) { |
1183 | list_del_init(entry: &cur_td->td_list); |
1184 | |
1185 | if (!list_empty(head: &cur_td->cancelled_td_list)) |
1186 | list_del_init(entry: &cur_td->cancelled_td_list); |
1187 | |
1188 | xhci_unmap_td_bounce_buffer(xhci, ring, td: cur_td); |
1189 | |
1190 | inc_td_cnt(urb: cur_td->urb); |
1191 | if (last_td_in_urb(td: cur_td)) |
1192 | xhci_giveback_urb_in_irq(xhci, cur_td, status: -ESHUTDOWN); |
1193 | } |
1194 | } |
1195 | |
1196 | static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci, |
1197 | int slot_id, int ep_index) |
1198 | { |
1199 | struct xhci_td *cur_td; |
1200 | struct xhci_td *tmp; |
1201 | struct xhci_virt_ep *ep; |
1202 | struct xhci_ring *ring; |
1203 | |
1204 | ep = xhci_get_virt_ep(xhci, slot_id, ep_index); |
1205 | if (!ep) |
1206 | return; |
1207 | |
1208 | if ((ep->ep_state & EP_HAS_STREAMS) || |
1209 | (ep->ep_state & EP_GETTING_NO_STREAMS)) { |
1210 | int stream_id; |
1211 | |
1212 | for (stream_id = 1; stream_id < ep->stream_info->num_streams; |
1213 | stream_id++) { |
1214 | ring = ep->stream_info->stream_rings[stream_id]; |
1215 | if (!ring) |
1216 | continue; |
1217 | |
1218 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_cancel_urb, |
1219 | fmt: "Killing URBs for slot ID %u, ep index %u, stream %u" , |
1220 | slot_id, ep_index, stream_id); |
1221 | xhci_kill_ring_urbs(xhci, ring); |
1222 | } |
1223 | } else { |
1224 | ring = ep->ring; |
1225 | if (!ring) |
1226 | return; |
1227 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_cancel_urb, |
1228 | fmt: "Killing URBs for slot ID %u, ep index %u" , |
1229 | slot_id, ep_index); |
1230 | xhci_kill_ring_urbs(xhci, ring); |
1231 | } |
1232 | |
1233 | list_for_each_entry_safe(cur_td, tmp, &ep->cancelled_td_list, |
1234 | cancelled_td_list) { |
1235 | list_del_init(entry: &cur_td->cancelled_td_list); |
1236 | inc_td_cnt(urb: cur_td->urb); |
1237 | |
1238 | if (last_td_in_urb(td: cur_td)) |
1239 | xhci_giveback_urb_in_irq(xhci, cur_td, status: -ESHUTDOWN); |
1240 | } |
1241 | } |
1242 | |
1243 | /* |
1244 | * host controller died, register read returns 0xffffffff |
1245 | * Complete pending commands, mark them ABORTED. |
1246 | * URBs need to be given back as usb core might be waiting with device locks |
1247 | * held for the URBs to finish during device disconnect, blocking host remove. |
1248 | * |
1249 | * Call with xhci->lock held. |
1250 | * lock is relased and re-acquired while giving back urb. |
1251 | */ |
1252 | void xhci_hc_died(struct xhci_hcd *xhci) |
1253 | { |
1254 | int i, j; |
1255 | |
1256 | if (xhci->xhc_state & XHCI_STATE_DYING) |
1257 | return; |
1258 | |
1259 | xhci_err(xhci, "xHCI host controller not responding, assume dead\n" ); |
1260 | xhci->xhc_state |= XHCI_STATE_DYING; |
1261 | |
1262 | xhci_cleanup_command_queue(xhci); |
1263 | |
1264 | /* return any pending urbs, remove may be waiting for them */ |
1265 | for (i = 0; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) { |
1266 | if (!xhci->devs[i]) |
1267 | continue; |
1268 | for (j = 0; j < 31; j++) |
1269 | xhci_kill_endpoint_urbs(xhci, slot_id: i, ep_index: j); |
1270 | } |
1271 | |
1272 | /* inform usb core hc died if PCI remove isn't already handling it */ |
1273 | if (!(xhci->xhc_state & XHCI_STATE_REMOVING)) |
1274 | usb_hc_died(hcd: xhci_to_hcd(xhci)); |
1275 | } |
1276 | |
1277 | static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci, |
1278 | struct xhci_virt_device *dev, |
1279 | struct xhci_ring *ep_ring, |
1280 | unsigned int ep_index) |
1281 | { |
1282 | union xhci_trb *dequeue_temp; |
1283 | |
1284 | dequeue_temp = ep_ring->dequeue; |
1285 | |
1286 | /* If we get two back-to-back stalls, and the first stalled transfer |
1287 | * ends just before a link TRB, the dequeue pointer will be left on |
1288 | * the link TRB by the code in the while loop. So we have to update |
1289 | * the dequeue pointer one segment further, or we'll jump off |
1290 | * the segment into la-la-land. |
1291 | */ |
1292 | if (trb_is_link(trb: ep_ring->dequeue)) { |
1293 | ep_ring->deq_seg = ep_ring->deq_seg->next; |
1294 | ep_ring->dequeue = ep_ring->deq_seg->trbs; |
1295 | } |
1296 | |
1297 | while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) { |
1298 | /* We have more usable TRBs */ |
1299 | ep_ring->dequeue++; |
1300 | if (trb_is_link(trb: ep_ring->dequeue)) { |
1301 | if (ep_ring->dequeue == |
1302 | dev->eps[ep_index].queued_deq_ptr) |
1303 | break; |
1304 | ep_ring->deq_seg = ep_ring->deq_seg->next; |
1305 | ep_ring->dequeue = ep_ring->deq_seg->trbs; |
1306 | } |
1307 | if (ep_ring->dequeue == dequeue_temp) { |
1308 | xhci_dbg(xhci, "Unable to find new dequeue pointer\n" ); |
1309 | break; |
1310 | } |
1311 | } |
1312 | } |
1313 | |
1314 | /* |
1315 | * When we get a completion for a Set Transfer Ring Dequeue Pointer command, |
1316 | * we need to clear the set deq pending flag in the endpoint ring state, so that |
1317 | * the TD queueing code can ring the doorbell again. We also need to ring the |
1318 | * endpoint doorbell to restart the ring, but only if there aren't more |
1319 | * cancellations pending. |
1320 | */ |
1321 | static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, |
1322 | union xhci_trb *trb, u32 cmd_comp_code) |
1323 | { |
1324 | unsigned int ep_index; |
1325 | unsigned int stream_id; |
1326 | struct xhci_ring *ep_ring; |
1327 | struct xhci_virt_ep *ep; |
1328 | struct xhci_ep_ctx *ep_ctx; |
1329 | struct xhci_slot_ctx *slot_ctx; |
1330 | struct xhci_td *td, *tmp_td; |
1331 | |
1332 | ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); |
1333 | stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2])); |
1334 | ep = xhci_get_virt_ep(xhci, slot_id, ep_index); |
1335 | if (!ep) |
1336 | return; |
1337 | |
1338 | ep_ring = xhci_virt_ep_to_ring(xhci, ep, stream_id); |
1339 | if (!ep_ring) { |
1340 | xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n" , |
1341 | stream_id); |
1342 | /* XXX: Harmless??? */ |
1343 | goto cleanup; |
1344 | } |
1345 | |
1346 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: ep->vdev->out_ctx, ep_index); |
1347 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: ep->vdev->out_ctx); |
1348 | trace_xhci_handle_cmd_set_deq(ctx: slot_ctx); |
1349 | trace_xhci_handle_cmd_set_deq_ep(ctx: ep_ctx); |
1350 | |
1351 | if (cmd_comp_code != COMP_SUCCESS) { |
1352 | unsigned int ep_state; |
1353 | unsigned int slot_state; |
1354 | |
1355 | switch (cmd_comp_code) { |
1356 | case COMP_TRB_ERROR: |
1357 | xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n" ); |
1358 | break; |
1359 | case COMP_CONTEXT_STATE_ERROR: |
1360 | xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n" ); |
1361 | ep_state = GET_EP_CTX_STATE(ep_ctx); |
1362 | slot_state = le32_to_cpu(slot_ctx->dev_state); |
1363 | slot_state = GET_SLOT_STATE(slot_state); |
1364 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_cancel_urb, |
1365 | fmt: "Slot state = %u, EP state = %u" , |
1366 | slot_state, ep_state); |
1367 | break; |
1368 | case COMP_SLOT_NOT_ENABLED_ERROR: |
1369 | xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n" , |
1370 | slot_id); |
1371 | break; |
1372 | default: |
1373 | xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n" , |
1374 | cmd_comp_code); |
1375 | break; |
1376 | } |
1377 | /* OK what do we do now? The endpoint state is hosed, and we |
1378 | * should never get to this point if the synchronization between |
1379 | * queueing, and endpoint state are correct. This might happen |
1380 | * if the device gets disconnected after we've finished |
1381 | * cancelling URBs, which might not be an error... |
1382 | */ |
1383 | } else { |
1384 | u64 deq; |
1385 | /* 4.6.10 deq ptr is written to the stream ctx for streams */ |
1386 | if (ep->ep_state & EP_HAS_STREAMS) { |
1387 | struct xhci_stream_ctx *ctx = |
1388 | &ep->stream_info->stream_ctx_array[stream_id]; |
1389 | deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK; |
1390 | } else { |
1391 | deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK; |
1392 | } |
1393 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_cancel_urb, |
1394 | fmt: "Successful Set TR Deq Ptr cmd, deq = @%08llx" , deq); |
1395 | if (xhci_trb_virt_to_dma(seg: ep->queued_deq_seg, |
1396 | trb: ep->queued_deq_ptr) == deq) { |
1397 | /* Update the ring's dequeue segment and dequeue pointer |
1398 | * to reflect the new position. |
1399 | */ |
1400 | update_ring_for_set_deq_completion(xhci, dev: ep->vdev, |
1401 | ep_ring, ep_index); |
1402 | } else { |
1403 | xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n" ); |
1404 | xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n" , |
1405 | ep->queued_deq_seg, ep->queued_deq_ptr); |
1406 | } |
1407 | } |
1408 | /* HW cached TDs cleared from cache, give them back */ |
1409 | list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, |
1410 | cancelled_td_list) { |
1411 | ep_ring = xhci_urb_to_transfer_ring(xhci: ep->xhci, urb: td->urb); |
1412 | if (td->cancel_status == TD_CLEARING_CACHE) { |
1413 | td->cancel_status = TD_CLEARED; |
1414 | xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n" , |
1415 | __func__, td->urb); |
1416 | xhci_td_cleanup(xhci: ep->xhci, td, ep_ring, status: td->status); |
1417 | } else { |
1418 | xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n" , |
1419 | __func__, td->urb, td->cancel_status); |
1420 | } |
1421 | } |
1422 | cleanup: |
1423 | ep->ep_state &= ~SET_DEQ_PENDING; |
1424 | ep->queued_deq_seg = NULL; |
1425 | ep->queued_deq_ptr = NULL; |
1426 | /* Restart any rings with pending URBs */ |
1427 | ring_doorbell_for_active_rings(xhci, slot_id, ep_index); |
1428 | } |
1429 | |
1430 | static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, |
1431 | union xhci_trb *trb, u32 cmd_comp_code) |
1432 | { |
1433 | struct xhci_virt_ep *ep; |
1434 | struct xhci_ep_ctx *ep_ctx; |
1435 | unsigned int ep_index; |
1436 | |
1437 | ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); |
1438 | ep = xhci_get_virt_ep(xhci, slot_id, ep_index); |
1439 | if (!ep) |
1440 | return; |
1441 | |
1442 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: ep->vdev->out_ctx, ep_index); |
1443 | trace_xhci_handle_cmd_reset_ep(ctx: ep_ctx); |
1444 | |
1445 | /* This command will only fail if the endpoint wasn't halted, |
1446 | * but we don't care. |
1447 | */ |
1448 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_reset_ep, |
1449 | fmt: "Ignoring reset ep completion code of %u" , cmd_comp_code); |
1450 | |
1451 | /* Cleanup cancelled TDs as ep is stopped. May queue a Set TR Deq cmd */ |
1452 | xhci_invalidate_cancelled_tds(ep); |
1453 | |
1454 | /* Clear our internal halted state */ |
1455 | ep->ep_state &= ~EP_HALTED; |
1456 | |
1457 | xhci_giveback_invalidated_tds(ep); |
1458 | |
1459 | /* if this was a soft reset, then restart */ |
1460 | if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP) |
1461 | ring_doorbell_for_active_rings(xhci, slot_id, ep_index); |
1462 | } |
1463 | |
1464 | static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id, |
1465 | struct xhci_command *command, u32 cmd_comp_code) |
1466 | { |
1467 | if (cmd_comp_code == COMP_SUCCESS) |
1468 | command->slot_id = slot_id; |
1469 | else |
1470 | command->slot_id = 0; |
1471 | } |
1472 | |
1473 | static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) |
1474 | { |
1475 | struct xhci_virt_device *virt_dev; |
1476 | struct xhci_slot_ctx *slot_ctx; |
1477 | |
1478 | virt_dev = xhci->devs[slot_id]; |
1479 | if (!virt_dev) |
1480 | return; |
1481 | |
1482 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: virt_dev->out_ctx); |
1483 | trace_xhci_handle_cmd_disable_slot(ctx: slot_ctx); |
1484 | |
1485 | if (xhci->quirks & XHCI_EP_LIMIT_QUIRK) |
1486 | /* Delete default control endpoint resources */ |
1487 | xhci_free_device_endpoint_resources(xhci, virt_dev, drop_control_ep: true); |
1488 | } |
1489 | |
1490 | static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, |
1491 | u32 cmd_comp_code) |
1492 | { |
1493 | struct xhci_virt_device *virt_dev; |
1494 | struct xhci_input_control_ctx *ctrl_ctx; |
1495 | struct xhci_ep_ctx *ep_ctx; |
1496 | unsigned int ep_index; |
1497 | u32 add_flags; |
1498 | |
1499 | /* |
1500 | * Configure endpoint commands can come from the USB core configuration |
1501 | * or alt setting changes, or when streams were being configured. |
1502 | */ |
1503 | |
1504 | virt_dev = xhci->devs[slot_id]; |
1505 | if (!virt_dev) |
1506 | return; |
1507 | ctrl_ctx = xhci_get_input_control_ctx(ctx: virt_dev->in_ctx); |
1508 | if (!ctrl_ctx) { |
1509 | xhci_warn(xhci, "Could not get input context, bad type.\n" ); |
1510 | return; |
1511 | } |
1512 | |
1513 | add_flags = le32_to_cpu(ctrl_ctx->add_flags); |
1514 | |
1515 | /* Input ctx add_flags are the endpoint index plus one */ |
1516 | ep_index = xhci_last_valid_endpoint(added_ctxs: add_flags) - 1; |
1517 | |
1518 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: virt_dev->out_ctx, ep_index); |
1519 | trace_xhci_handle_cmd_config_ep(ctx: ep_ctx); |
1520 | |
1521 | return; |
1522 | } |
1523 | |
1524 | static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id) |
1525 | { |
1526 | struct xhci_virt_device *vdev; |
1527 | struct xhci_slot_ctx *slot_ctx; |
1528 | |
1529 | vdev = xhci->devs[slot_id]; |
1530 | if (!vdev) |
1531 | return; |
1532 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: vdev->out_ctx); |
1533 | trace_xhci_handle_cmd_addr_dev(ctx: slot_ctx); |
1534 | } |
1535 | |
1536 | static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id) |
1537 | { |
1538 | struct xhci_virt_device *vdev; |
1539 | struct xhci_slot_ctx *slot_ctx; |
1540 | |
1541 | vdev = xhci->devs[slot_id]; |
1542 | if (!vdev) { |
1543 | xhci_warn(xhci, "Reset device command completion for disabled slot %u\n" , |
1544 | slot_id); |
1545 | return; |
1546 | } |
1547 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: vdev->out_ctx); |
1548 | trace_xhci_handle_cmd_reset_dev(ctx: slot_ctx); |
1549 | |
1550 | xhci_dbg(xhci, "Completed reset device command.\n" ); |
1551 | } |
1552 | |
1553 | static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci, |
1554 | struct xhci_event_cmd *event) |
1555 | { |
1556 | if (!(xhci->quirks & XHCI_NEC_HOST)) { |
1557 | xhci_warn(xhci, "WARN NEC_GET_FW command on non-NEC host\n" ); |
1558 | return; |
1559 | } |
1560 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
1561 | fmt: "NEC firmware version %2x.%02x" , |
1562 | NEC_FW_MAJOR(le32_to_cpu(event->status)), |
1563 | NEC_FW_MINOR(le32_to_cpu(event->status))); |
1564 | } |
1565 | |
1566 | static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status) |
1567 | { |
1568 | list_del(entry: &cmd->cmd_list); |
1569 | |
1570 | if (cmd->completion) { |
1571 | cmd->status = status; |
1572 | complete(cmd->completion); |
1573 | } else { |
1574 | kfree(objp: cmd); |
1575 | } |
1576 | } |
1577 | |
1578 | void xhci_cleanup_command_queue(struct xhci_hcd *xhci) |
1579 | { |
1580 | struct xhci_command *cur_cmd, *tmp_cmd; |
1581 | xhci->current_cmd = NULL; |
1582 | list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list) |
1583 | xhci_complete_del_and_free_cmd(cmd: cur_cmd, COMP_COMMAND_ABORTED); |
1584 | } |
1585 | |
1586 | void xhci_handle_command_timeout(struct work_struct *work) |
1587 | { |
1588 | struct xhci_hcd *xhci; |
1589 | unsigned long flags; |
1590 | char str[XHCI_MSG_MAX]; |
1591 | u64 hw_ring_state; |
1592 | u32 cmd_field3; |
1593 | u32 usbsts; |
1594 | |
1595 | xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer); |
1596 | |
1597 | spin_lock_irqsave(&xhci->lock, flags); |
1598 | |
1599 | /* |
1600 | * If timeout work is pending, or current_cmd is NULL, it means we |
1601 | * raced with command completion. Command is handled so just return. |
1602 | */ |
1603 | if (!xhci->current_cmd || delayed_work_pending(&xhci->cmd_timer)) { |
1604 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
1605 | return; |
1606 | } |
1607 | |
1608 | cmd_field3 = le32_to_cpu(xhci->current_cmd->command_trb->generic.field[3]); |
1609 | usbsts = readl(addr: &xhci->op_regs->status); |
1610 | xhci_dbg(xhci, "Command timeout, USBSTS:%s\n" , xhci_decode_usbsts(str, usbsts)); |
1611 | |
1612 | /* Bail out and tear down xhci if a stop endpoint command failed */ |
1613 | if (TRB_FIELD_TO_TYPE(cmd_field3) == TRB_STOP_RING) { |
1614 | struct xhci_virt_ep *ep; |
1615 | |
1616 | xhci_warn(xhci, "xHCI host not responding to stop endpoint command\n" ); |
1617 | |
1618 | ep = xhci_get_virt_ep(xhci, TRB_TO_SLOT_ID(cmd_field3), |
1619 | TRB_TO_EP_INDEX(cmd_field3)); |
1620 | if (ep) |
1621 | ep->ep_state &= ~EP_STOP_CMD_PENDING; |
1622 | |
1623 | xhci_halt(xhci); |
1624 | xhci_hc_died(xhci); |
1625 | goto time_out_completed; |
1626 | } |
1627 | |
1628 | /* mark this command to be cancelled */ |
1629 | xhci->current_cmd->status = COMP_COMMAND_ABORTED; |
1630 | |
1631 | /* Make sure command ring is running before aborting it */ |
1632 | hw_ring_state = xhci_read_64(xhci, regs: &xhci->op_regs->cmd_ring); |
1633 | if (hw_ring_state == ~(u64)0) { |
1634 | xhci_hc_died(xhci); |
1635 | goto time_out_completed; |
1636 | } |
1637 | |
1638 | if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) && |
1639 | (hw_ring_state & CMD_RING_RUNNING)) { |
1640 | /* Prevent new doorbell, and start command abort */ |
1641 | xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; |
1642 | xhci_dbg(xhci, "Command timeout\n" ); |
1643 | xhci_abort_cmd_ring(xhci, flags); |
1644 | goto time_out_completed; |
1645 | } |
1646 | |
1647 | /* host removed. Bail out */ |
1648 | if (xhci->xhc_state & XHCI_STATE_REMOVING) { |
1649 | xhci_dbg(xhci, "host removed, ring start fail?\n" ); |
1650 | xhci_cleanup_command_queue(xhci); |
1651 | |
1652 | goto time_out_completed; |
1653 | } |
1654 | |
1655 | /* command timeout on stopped ring, ring can't be aborted */ |
1656 | xhci_dbg(xhci, "Command timeout on stopped ring\n" ); |
1657 | xhci_handle_stopped_cmd_ring(xhci, cur_cmd: xhci->current_cmd); |
1658 | |
1659 | time_out_completed: |
1660 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
1661 | return; |
1662 | } |
1663 | |
1664 | static void handle_cmd_completion(struct xhci_hcd *xhci, |
1665 | struct xhci_event_cmd *event) |
1666 | { |
1667 | unsigned int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); |
1668 | u64 cmd_dma; |
1669 | dma_addr_t cmd_dequeue_dma; |
1670 | u32 cmd_comp_code; |
1671 | union xhci_trb *cmd_trb; |
1672 | struct xhci_command *cmd; |
1673 | u32 cmd_type; |
1674 | |
1675 | if (slot_id >= MAX_HC_SLOTS) { |
1676 | xhci_warn(xhci, "Invalid slot_id %u\n" , slot_id); |
1677 | return; |
1678 | } |
1679 | |
1680 | cmd_dma = le64_to_cpu(event->cmd_trb); |
1681 | cmd_trb = xhci->cmd_ring->dequeue; |
1682 | |
1683 | trace_xhci_handle_command(ring: xhci->cmd_ring, trb: &cmd_trb->generic); |
1684 | |
1685 | cmd_dequeue_dma = xhci_trb_virt_to_dma(seg: xhci->cmd_ring->deq_seg, |
1686 | trb: cmd_trb); |
1687 | /* |
1688 | * Check whether the completion event is for our internal kept |
1689 | * command. |
1690 | */ |
1691 | if (!cmd_dequeue_dma || cmd_dma != (u64)cmd_dequeue_dma) { |
1692 | xhci_warn(xhci, |
1693 | "ERROR mismatched command completion event\n" ); |
1694 | return; |
1695 | } |
1696 | |
1697 | cmd = list_first_entry(&xhci->cmd_list, struct xhci_command, cmd_list); |
1698 | |
1699 | cancel_delayed_work(dwork: &xhci->cmd_timer); |
1700 | |
1701 | cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); |
1702 | |
1703 | /* If CMD ring stopped we own the trbs between enqueue and dequeue */ |
1704 | if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) { |
1705 | complete_all(&xhci->cmd_ring_stop_completion); |
1706 | return; |
1707 | } |
1708 | |
1709 | if (cmd->command_trb != xhci->cmd_ring->dequeue) { |
1710 | xhci_err(xhci, |
1711 | "Command completion event does not match command\n" ); |
1712 | return; |
1713 | } |
1714 | |
1715 | /* |
1716 | * Host aborted the command ring, check if the current command was |
1717 | * supposed to be aborted, otherwise continue normally. |
1718 | * The command ring is stopped now, but the xHC will issue a Command |
1719 | * Ring Stopped event which will cause us to restart it. |
1720 | */ |
1721 | if (cmd_comp_code == COMP_COMMAND_ABORTED) { |
1722 | xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; |
1723 | if (cmd->status == COMP_COMMAND_ABORTED) { |
1724 | if (xhci->current_cmd == cmd) |
1725 | xhci->current_cmd = NULL; |
1726 | goto event_handled; |
1727 | } |
1728 | } |
1729 | |
1730 | cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3])); |
1731 | switch (cmd_type) { |
1732 | case TRB_ENABLE_SLOT: |
1733 | xhci_handle_cmd_enable_slot(xhci, slot_id, command: cmd, cmd_comp_code); |
1734 | break; |
1735 | case TRB_DISABLE_SLOT: |
1736 | xhci_handle_cmd_disable_slot(xhci, slot_id); |
1737 | break; |
1738 | case TRB_CONFIG_EP: |
1739 | if (!cmd->completion) |
1740 | xhci_handle_cmd_config_ep(xhci, slot_id, cmd_comp_code); |
1741 | break; |
1742 | case TRB_EVAL_CONTEXT: |
1743 | break; |
1744 | case TRB_ADDR_DEV: |
1745 | xhci_handle_cmd_addr_dev(xhci, slot_id); |
1746 | break; |
1747 | case TRB_STOP_RING: |
1748 | WARN_ON(slot_id != TRB_TO_SLOT_ID( |
1749 | le32_to_cpu(cmd_trb->generic.field[3]))); |
1750 | if (!cmd->completion) |
1751 | xhci_handle_cmd_stop_ep(xhci, slot_id, trb: cmd_trb, |
1752 | comp_code: cmd_comp_code); |
1753 | break; |
1754 | case TRB_SET_DEQ: |
1755 | WARN_ON(slot_id != TRB_TO_SLOT_ID( |
1756 | le32_to_cpu(cmd_trb->generic.field[3]))); |
1757 | xhci_handle_cmd_set_deq(xhci, slot_id, trb: cmd_trb, cmd_comp_code); |
1758 | break; |
1759 | case TRB_CMD_NOOP: |
1760 | /* Is this an aborted command turned to NO-OP? */ |
1761 | if (cmd->status == COMP_COMMAND_RING_STOPPED) |
1762 | cmd_comp_code = COMP_COMMAND_RING_STOPPED; |
1763 | break; |
1764 | case TRB_RESET_EP: |
1765 | WARN_ON(slot_id != TRB_TO_SLOT_ID( |
1766 | le32_to_cpu(cmd_trb->generic.field[3]))); |
1767 | xhci_handle_cmd_reset_ep(xhci, slot_id, trb: cmd_trb, cmd_comp_code); |
1768 | break; |
1769 | case TRB_RESET_DEV: |
1770 | /* SLOT_ID field in reset device cmd completion event TRB is 0. |
1771 | * Use the SLOT_ID from the command TRB instead (xhci 4.6.11) |
1772 | */ |
1773 | slot_id = TRB_TO_SLOT_ID( |
1774 | le32_to_cpu(cmd_trb->generic.field[3])); |
1775 | xhci_handle_cmd_reset_dev(xhci, slot_id); |
1776 | break; |
1777 | case TRB_NEC_GET_FW: |
1778 | xhci_handle_cmd_nec_get_fw(xhci, event); |
1779 | break; |
1780 | default: |
1781 | /* Skip over unknown commands on the event ring */ |
1782 | xhci_info(xhci, "INFO unknown command type %d\n" , cmd_type); |
1783 | break; |
1784 | } |
1785 | |
1786 | /* restart timer if this wasn't the last command */ |
1787 | if (!list_is_singular(head: &xhci->cmd_list)) { |
1788 | xhci->current_cmd = list_first_entry(&cmd->cmd_list, |
1789 | struct xhci_command, cmd_list); |
1790 | xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT); |
1791 | } else if (xhci->current_cmd == cmd) { |
1792 | xhci->current_cmd = NULL; |
1793 | } |
1794 | |
1795 | event_handled: |
1796 | xhci_complete_del_and_free_cmd(cmd, status: cmd_comp_code); |
1797 | |
1798 | inc_deq(xhci, ring: xhci->cmd_ring); |
1799 | } |
1800 | |
1801 | static void handle_vendor_event(struct xhci_hcd *xhci, |
1802 | union xhci_trb *event, u32 trb_type) |
1803 | { |
1804 | xhci_dbg(xhci, "Vendor specific event TRB type = %u\n" , trb_type); |
1805 | if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST)) |
1806 | handle_cmd_completion(xhci, event: &event->event_cmd); |
1807 | } |
1808 | |
1809 | static void handle_device_notification(struct xhci_hcd *xhci, |
1810 | union xhci_trb *event) |
1811 | { |
1812 | u32 slot_id; |
1813 | struct usb_device *udev; |
1814 | |
1815 | slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3])); |
1816 | if (!xhci->devs[slot_id]) { |
1817 | xhci_warn(xhci, "Device Notification event for " |
1818 | "unused slot %u\n" , slot_id); |
1819 | return; |
1820 | } |
1821 | |
1822 | xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n" , |
1823 | slot_id); |
1824 | udev = xhci->devs[slot_id]->udev; |
1825 | if (udev && udev->parent) |
1826 | usb_wakeup_notification(hdev: udev->parent, portnum: udev->portnum); |
1827 | } |
1828 | |
1829 | /* |
1830 | * Quirk hanlder for errata seen on Cavium ThunderX2 processor XHCI |
1831 | * Controller. |
1832 | * As per ThunderX2errata-129 USB 2 device may come up as USB 1 |
1833 | * If a connection to a USB 1 device is followed by another connection |
1834 | * to a USB 2 device. |
1835 | * |
1836 | * Reset the PHY after the USB device is disconnected if device speed |
1837 | * is less than HCD_USB3. |
1838 | * Retry the reset sequence max of 4 times checking the PLL lock status. |
1839 | * |
1840 | */ |
1841 | static void xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci) |
1842 | { |
1843 | struct usb_hcd *hcd = xhci_to_hcd(xhci); |
1844 | u32 pll_lock_check; |
1845 | u32 retry_count = 4; |
1846 | |
1847 | do { |
1848 | /* Assert PHY reset */ |
1849 | writel(val: 0x6F, addr: hcd->regs + 0x1048); |
1850 | udelay(10); |
1851 | /* De-assert the PHY reset */ |
1852 | writel(val: 0x7F, addr: hcd->regs + 0x1048); |
1853 | udelay(200); |
1854 | pll_lock_check = readl(addr: hcd->regs + 0x1070); |
1855 | } while (!(pll_lock_check & 0x1) && --retry_count); |
1856 | } |
1857 | |
1858 | static void handle_port_status(struct xhci_hcd *xhci, |
1859 | struct xhci_interrupter *ir, |
1860 | union xhci_trb *event) |
1861 | { |
1862 | struct usb_hcd *hcd; |
1863 | u32 port_id; |
1864 | u32 portsc, cmd_reg; |
1865 | int max_ports; |
1866 | int slot_id; |
1867 | unsigned int hcd_portnum; |
1868 | struct xhci_bus_state *bus_state; |
1869 | bool bogus_port_status = false; |
1870 | struct xhci_port *port; |
1871 | |
1872 | /* Port status change events always have a successful completion code */ |
1873 | if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) |
1874 | xhci_warn(xhci, |
1875 | "WARN: xHC returned failed port status event\n" ); |
1876 | |
1877 | port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0])); |
1878 | max_ports = HCS_MAX_PORTS(xhci->hcs_params1); |
1879 | |
1880 | if ((port_id <= 0) || (port_id > max_ports)) { |
1881 | xhci_warn(xhci, "Port change event with invalid port ID %d\n" , |
1882 | port_id); |
1883 | return; |
1884 | } |
1885 | |
1886 | port = &xhci->hw_ports[port_id - 1]; |
1887 | if (!port || !port->rhub || port->hcd_portnum == DUPLICATE_ENTRY) { |
1888 | xhci_warn(xhci, "Port change event, no port for port ID %u\n" , |
1889 | port_id); |
1890 | bogus_port_status = true; |
1891 | goto cleanup; |
1892 | } |
1893 | |
1894 | /* We might get interrupts after shared_hcd is removed */ |
1895 | if (port->rhub == &xhci->usb3_rhub && xhci->shared_hcd == NULL) { |
1896 | xhci_dbg(xhci, "ignore port event for removed USB3 hcd\n" ); |
1897 | bogus_port_status = true; |
1898 | goto cleanup; |
1899 | } |
1900 | |
1901 | hcd = port->rhub->hcd; |
1902 | bus_state = &port->rhub->bus_state; |
1903 | hcd_portnum = port->hcd_portnum; |
1904 | portsc = readl(addr: port->addr); |
1905 | |
1906 | xhci_dbg(xhci, "Port change event, %d-%d, id %d, portsc: 0x%x\n" , |
1907 | hcd->self.busnum, hcd_portnum + 1, port_id, portsc); |
1908 | |
1909 | trace_xhci_handle_port_status(port, portsc); |
1910 | |
1911 | if (hcd->state == HC_STATE_SUSPENDED) { |
1912 | xhci_dbg(xhci, "resume root hub\n" ); |
1913 | usb_hcd_resume_root_hub(hcd); |
1914 | } |
1915 | |
1916 | if (hcd->speed >= HCD_USB3 && |
1917 | (portsc & PORT_PLS_MASK) == XDEV_INACTIVE) { |
1918 | slot_id = xhci_find_slot_id_by_port(hcd, xhci, port: hcd_portnum + 1); |
1919 | if (slot_id && xhci->devs[slot_id]) |
1920 | xhci->devs[slot_id]->flags |= VDEV_PORT_ERROR; |
1921 | } |
1922 | |
1923 | if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) { |
1924 | xhci_dbg(xhci, "port resume event for port %d\n" , port_id); |
1925 | |
1926 | cmd_reg = readl(addr: &xhci->op_regs->command); |
1927 | if (!(cmd_reg & CMD_RUN)) { |
1928 | xhci_warn(xhci, "xHC is not running.\n" ); |
1929 | goto cleanup; |
1930 | } |
1931 | |
1932 | if (DEV_SUPERSPEED_ANY(portsc)) { |
1933 | xhci_dbg(xhci, "remote wake SS port %d\n" , port_id); |
1934 | /* Set a flag to say the port signaled remote wakeup, |
1935 | * so we can tell the difference between the end of |
1936 | * device and host initiated resume. |
1937 | */ |
1938 | bus_state->port_remote_wakeup |= 1 << hcd_portnum; |
1939 | xhci_test_and_clear_bit(xhci, port, PORT_PLC); |
1940 | usb_hcd_start_port_resume(bus: &hcd->self, portnum: hcd_portnum); |
1941 | xhci_set_link_state(xhci, port, XDEV_U0); |
1942 | /* Need to wait until the next link state change |
1943 | * indicates the device is actually in U0. |
1944 | */ |
1945 | bogus_port_status = true; |
1946 | goto cleanup; |
1947 | } else if (!test_bit(hcd_portnum, &bus_state->resuming_ports)) { |
1948 | xhci_dbg(xhci, "resume HS port %d\n" , port_id); |
1949 | port->resume_timestamp = jiffies + |
1950 | msecs_to_jiffies(USB_RESUME_TIMEOUT); |
1951 | set_bit(nr: hcd_portnum, addr: &bus_state->resuming_ports); |
1952 | /* Do the rest in GetPortStatus after resume time delay. |
1953 | * Avoid polling roothub status before that so that a |
1954 | * usb device auto-resume latency around ~40ms. |
1955 | */ |
1956 | set_bit(HCD_FLAG_POLL_RH, addr: &hcd->flags); |
1957 | mod_timer(timer: &hcd->rh_timer, |
1958 | expires: port->resume_timestamp); |
1959 | usb_hcd_start_port_resume(bus: &hcd->self, portnum: hcd_portnum); |
1960 | bogus_port_status = true; |
1961 | } |
1962 | } |
1963 | |
1964 | if ((portsc & PORT_PLC) && |
1965 | DEV_SUPERSPEED_ANY(portsc) && |
1966 | ((portsc & PORT_PLS_MASK) == XDEV_U0 || |
1967 | (portsc & PORT_PLS_MASK) == XDEV_U1 || |
1968 | (portsc & PORT_PLS_MASK) == XDEV_U2)) { |
1969 | xhci_dbg(xhci, "resume SS port %d finished\n" , port_id); |
1970 | complete(&port->u3exit_done); |
1971 | /* We've just brought the device into U0/1/2 through either the |
1972 | * Resume state after a device remote wakeup, or through the |
1973 | * U3Exit state after a host-initiated resume. If it's a device |
1974 | * initiated remote wake, don't pass up the link state change, |
1975 | * so the roothub behavior is consistent with external |
1976 | * USB 3.0 hub behavior. |
1977 | */ |
1978 | slot_id = xhci_find_slot_id_by_port(hcd, xhci, port: hcd_portnum + 1); |
1979 | if (slot_id && xhci->devs[slot_id]) |
1980 | xhci_ring_device(xhci, slot_id); |
1981 | if (bus_state->port_remote_wakeup & (1 << hcd_portnum)) { |
1982 | xhci_test_and_clear_bit(xhci, port, PORT_PLC); |
1983 | usb_wakeup_notification(hdev: hcd->self.root_hub, |
1984 | portnum: hcd_portnum + 1); |
1985 | bogus_port_status = true; |
1986 | goto cleanup; |
1987 | } |
1988 | } |
1989 | |
1990 | /* |
1991 | * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or |
1992 | * RExit to a disconnect state). If so, let the driver know it's |
1993 | * out of the RExit state. |
1994 | */ |
1995 | if (hcd->speed < HCD_USB3 && port->rexit_active) { |
1996 | complete(&port->rexit_done); |
1997 | port->rexit_active = false; |
1998 | bogus_port_status = true; |
1999 | goto cleanup; |
2000 | } |
2001 | |
2002 | if (hcd->speed < HCD_USB3) { |
2003 | xhci_test_and_clear_bit(xhci, port, PORT_PLC); |
2004 | if ((xhci->quirks & XHCI_RESET_PLL_ON_DISCONNECT) && |
2005 | (portsc & PORT_CSC) && !(portsc & PORT_CONNECT)) |
2006 | xhci_cavium_reset_phy_quirk(xhci); |
2007 | } |
2008 | |
2009 | cleanup: |
2010 | |
2011 | /* Don't make the USB core poll the roothub if we got a bad port status |
2012 | * change event. Besides, at that point we can't tell which roothub |
2013 | * (USB 2.0 or USB 3.0) to kick. |
2014 | */ |
2015 | if (bogus_port_status) |
2016 | return; |
2017 | |
2018 | /* |
2019 | * xHCI port-status-change events occur when the "or" of all the |
2020 | * status-change bits in the portsc register changes from 0 to 1. |
2021 | * New status changes won't cause an event if any other change |
2022 | * bits are still set. When an event occurs, switch over to |
2023 | * polling to avoid losing status changes. |
2024 | */ |
2025 | xhci_dbg(xhci, "%s: starting usb%d port polling.\n" , |
2026 | __func__, hcd->self.busnum); |
2027 | set_bit(HCD_FLAG_POLL_RH, addr: &hcd->flags); |
2028 | spin_unlock(lock: &xhci->lock); |
2029 | /* Pass this up to the core */ |
2030 | usb_hcd_poll_rh_status(hcd); |
2031 | spin_lock(lock: &xhci->lock); |
2032 | } |
2033 | |
2034 | /* |
2035 | * This TD is defined by the TRBs starting at start_trb in start_seg and ending |
2036 | * at end_trb, which may be in another segment. If the suspect DMA address is a |
2037 | * TRB in this TD, this function returns that TRB's segment. Otherwise it |
2038 | * returns 0. |
2039 | */ |
2040 | struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, |
2041 | struct xhci_segment *start_seg, |
2042 | union xhci_trb *start_trb, |
2043 | union xhci_trb *end_trb, |
2044 | dma_addr_t suspect_dma, |
2045 | bool debug) |
2046 | { |
2047 | dma_addr_t start_dma; |
2048 | dma_addr_t end_seg_dma; |
2049 | dma_addr_t end_trb_dma; |
2050 | struct xhci_segment *cur_seg; |
2051 | |
2052 | start_dma = xhci_trb_virt_to_dma(seg: start_seg, trb: start_trb); |
2053 | cur_seg = start_seg; |
2054 | |
2055 | do { |
2056 | if (start_dma == 0) |
2057 | return NULL; |
2058 | /* We may get an event for a Link TRB in the middle of a TD */ |
2059 | end_seg_dma = xhci_trb_virt_to_dma(seg: cur_seg, |
2060 | trb: &cur_seg->trbs[TRBS_PER_SEGMENT - 1]); |
2061 | /* If the end TRB isn't in this segment, this is set to 0 */ |
2062 | end_trb_dma = xhci_trb_virt_to_dma(seg: cur_seg, trb: end_trb); |
2063 | |
2064 | if (debug) |
2065 | xhci_warn(xhci, |
2066 | "Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n" , |
2067 | (unsigned long long)suspect_dma, |
2068 | (unsigned long long)start_dma, |
2069 | (unsigned long long)end_trb_dma, |
2070 | (unsigned long long)cur_seg->dma, |
2071 | (unsigned long long)end_seg_dma); |
2072 | |
2073 | if (end_trb_dma > 0) { |
2074 | /* The end TRB is in this segment, so suspect should be here */ |
2075 | if (start_dma <= end_trb_dma) { |
2076 | if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma) |
2077 | return cur_seg; |
2078 | } else { |
2079 | /* Case for one segment with |
2080 | * a TD wrapped around to the top |
2081 | */ |
2082 | if ((suspect_dma >= start_dma && |
2083 | suspect_dma <= end_seg_dma) || |
2084 | (suspect_dma >= cur_seg->dma && |
2085 | suspect_dma <= end_trb_dma)) |
2086 | return cur_seg; |
2087 | } |
2088 | return NULL; |
2089 | } else { |
2090 | /* Might still be somewhere in this segment */ |
2091 | if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma) |
2092 | return cur_seg; |
2093 | } |
2094 | cur_seg = cur_seg->next; |
2095 | start_dma = xhci_trb_virt_to_dma(seg: cur_seg, trb: &cur_seg->trbs[0]); |
2096 | } while (cur_seg != start_seg); |
2097 | |
2098 | return NULL; |
2099 | } |
2100 | |
2101 | static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td, |
2102 | struct xhci_virt_ep *ep) |
2103 | { |
2104 | /* |
2105 | * As part of low/full-speed endpoint-halt processing |
2106 | * we must clear the TT buffer (USB 2.0 specification 11.17.5). |
2107 | */ |
2108 | if (td->urb->dev->tt && !usb_pipeint(td->urb->pipe) && |
2109 | (td->urb->dev->tt->hub != xhci_to_hcd(xhci)->self.root_hub) && |
2110 | !(ep->ep_state & EP_CLEARING_TT)) { |
2111 | ep->ep_state |= EP_CLEARING_TT; |
2112 | td->urb->ep->hcpriv = td->urb->dev; |
2113 | if (usb_hub_clear_tt_buffer(urb: td->urb)) |
2114 | ep->ep_state &= ~EP_CLEARING_TT; |
2115 | } |
2116 | } |
2117 | |
2118 | /* Check if an error has halted the endpoint ring. The class driver will |
2119 | * cleanup the halt for a non-default control endpoint if we indicate a stall. |
2120 | * However, a babble and other errors also halt the endpoint ring, and the class |
2121 | * driver won't clear the halt in that case, so we need to issue a Set Transfer |
2122 | * Ring Dequeue Pointer command manually. |
2123 | */ |
2124 | static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci, |
2125 | struct xhci_ep_ctx *ep_ctx, |
2126 | unsigned int trb_comp_code) |
2127 | { |
2128 | /* TRB completion codes that may require a manual halt cleanup */ |
2129 | if (trb_comp_code == COMP_USB_TRANSACTION_ERROR || |
2130 | trb_comp_code == COMP_BABBLE_DETECTED_ERROR || |
2131 | trb_comp_code == COMP_SPLIT_TRANSACTION_ERROR) |
2132 | /* The 0.95 spec says a babbling control endpoint |
2133 | * is not halted. The 0.96 spec says it is. Some HW |
2134 | * claims to be 0.95 compliant, but it halts the control |
2135 | * endpoint anyway. Check if a babble halted the |
2136 | * endpoint. |
2137 | */ |
2138 | if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED) |
2139 | return 1; |
2140 | |
2141 | return 0; |
2142 | } |
2143 | |
2144 | int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code) |
2145 | { |
2146 | if (trb_comp_code >= 224 && trb_comp_code <= 255) { |
2147 | /* Vendor defined "informational" completion code, |
2148 | * treat as not-an-error. |
2149 | */ |
2150 | xhci_dbg(xhci, "Vendor defined info completion code %u\n" , |
2151 | trb_comp_code); |
2152 | xhci_dbg(xhci, "Treating code as success.\n" ); |
2153 | return 1; |
2154 | } |
2155 | return 0; |
2156 | } |
2157 | |
2158 | static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, |
2159 | struct xhci_ring *ep_ring, struct xhci_td *td, |
2160 | u32 trb_comp_code) |
2161 | { |
2162 | struct xhci_ep_ctx *ep_ctx; |
2163 | |
2164 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: ep->vdev->out_ctx, ep_index: ep->ep_index); |
2165 | |
2166 | switch (trb_comp_code) { |
2167 | case COMP_STOPPED_LENGTH_INVALID: |
2168 | case COMP_STOPPED_SHORT_PACKET: |
2169 | case COMP_STOPPED: |
2170 | /* |
2171 | * The "Stop Endpoint" completion will take care of any |
2172 | * stopped TDs. A stopped TD may be restarted, so don't update |
2173 | * the ring dequeue pointer or take this TD off any lists yet. |
2174 | */ |
2175 | return 0; |
2176 | case COMP_USB_TRANSACTION_ERROR: |
2177 | case COMP_BABBLE_DETECTED_ERROR: |
2178 | case COMP_SPLIT_TRANSACTION_ERROR: |
2179 | /* |
2180 | * If endpoint context state is not halted we might be |
2181 | * racing with a reset endpoint command issued by a unsuccessful |
2182 | * stop endpoint completion (context error). In that case the |
2183 | * td should be on the cancelled list, and EP_HALTED flag set. |
2184 | * |
2185 | * Or then it's not halted due to the 0.95 spec stating that a |
2186 | * babbling control endpoint should not halt. The 0.96 spec |
2187 | * again says it should. Some HW claims to be 0.95 compliant, |
2188 | * but it halts the control endpoint anyway. |
2189 | */ |
2190 | if (GET_EP_CTX_STATE(ep_ctx) != EP_STATE_HALTED) { |
2191 | /* |
2192 | * If EP_HALTED is set and TD is on the cancelled list |
2193 | * the TD and dequeue pointer will be handled by reset |
2194 | * ep command completion |
2195 | */ |
2196 | if ((ep->ep_state & EP_HALTED) && |
2197 | !list_empty(head: &td->cancelled_td_list)) { |
2198 | xhci_dbg(xhci, "Already resolving halted ep for 0x%llx\n" , |
2199 | (unsigned long long)xhci_trb_virt_to_dma( |
2200 | td->start_seg, td->first_trb)); |
2201 | return 0; |
2202 | } |
2203 | /* endpoint not halted, don't reset it */ |
2204 | break; |
2205 | } |
2206 | /* Almost same procedure as for STALL_ERROR below */ |
2207 | xhci_clear_hub_tt_buffer(xhci, td, ep); |
2208 | xhci_handle_halted_endpoint(xhci, ep, td, reset_type: EP_HARD_RESET); |
2209 | return 0; |
2210 | case COMP_STALL_ERROR: |
2211 | /* |
2212 | * xhci internal endpoint state will go to a "halt" state for |
2213 | * any stall, including default control pipe protocol stall. |
2214 | * To clear the host side halt we need to issue a reset endpoint |
2215 | * command, followed by a set dequeue command to move past the |
2216 | * TD. |
2217 | * Class drivers clear the device side halt from a functional |
2218 | * stall later. Hub TT buffer should only be cleared for FS/LS |
2219 | * devices behind HS hubs for functional stalls. |
2220 | */ |
2221 | if (ep->ep_index != 0) |
2222 | xhci_clear_hub_tt_buffer(xhci, td, ep); |
2223 | |
2224 | xhci_handle_halted_endpoint(xhci, ep, td, reset_type: EP_HARD_RESET); |
2225 | |
2226 | return 0; /* xhci_handle_halted_endpoint marked td cancelled */ |
2227 | default: |
2228 | break; |
2229 | } |
2230 | |
2231 | /* Update ring dequeue pointer */ |
2232 | ep_ring->dequeue = td->last_trb; |
2233 | ep_ring->deq_seg = td->last_trb_seg; |
2234 | inc_deq(xhci, ring: ep_ring); |
2235 | |
2236 | return xhci_td_cleanup(xhci, td, ep_ring, status: td->status); |
2237 | } |
2238 | |
2239 | /* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */ |
2240 | static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring, |
2241 | union xhci_trb *stop_trb) |
2242 | { |
2243 | u32 sum; |
2244 | union xhci_trb *trb = ring->dequeue; |
2245 | struct xhci_segment *seg = ring->deq_seg; |
2246 | |
2247 | for (sum = 0; trb != stop_trb; next_trb(xhci, ring, seg: &seg, trb: &trb)) { |
2248 | if (!trb_is_noop(trb) && !trb_is_link(trb)) |
2249 | sum += TRB_LEN(le32_to_cpu(trb->generic.field[2])); |
2250 | } |
2251 | return sum; |
2252 | } |
2253 | |
2254 | /* |
2255 | * Process control tds, update urb status and actual_length. |
2256 | */ |
2257 | static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, |
2258 | struct xhci_ring *ep_ring, struct xhci_td *td, |
2259 | union xhci_trb *ep_trb, struct xhci_transfer_event *event) |
2260 | { |
2261 | struct xhci_ep_ctx *ep_ctx; |
2262 | u32 trb_comp_code; |
2263 | u32 remaining, requested; |
2264 | u32 trb_type; |
2265 | |
2266 | trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3])); |
2267 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: ep->vdev->out_ctx, ep_index: ep->ep_index); |
2268 | trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); |
2269 | requested = td->urb->transfer_buffer_length; |
2270 | remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); |
2271 | |
2272 | switch (trb_comp_code) { |
2273 | case COMP_SUCCESS: |
2274 | if (trb_type != TRB_STATUS) { |
2275 | xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n" , |
2276 | (trb_type == TRB_DATA) ? "data" : "setup" ); |
2277 | td->status = -ESHUTDOWN; |
2278 | break; |
2279 | } |
2280 | td->status = 0; |
2281 | break; |
2282 | case COMP_SHORT_PACKET: |
2283 | td->status = 0; |
2284 | break; |
2285 | case COMP_STOPPED_SHORT_PACKET: |
2286 | if (trb_type == TRB_DATA || trb_type == TRB_NORMAL) |
2287 | td->urb->actual_length = remaining; |
2288 | else |
2289 | xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n" ); |
2290 | goto finish_td; |
2291 | case COMP_STOPPED: |
2292 | switch (trb_type) { |
2293 | case TRB_SETUP: |
2294 | td->urb->actual_length = 0; |
2295 | goto finish_td; |
2296 | case TRB_DATA: |
2297 | case TRB_NORMAL: |
2298 | td->urb->actual_length = requested - remaining; |
2299 | goto finish_td; |
2300 | case TRB_STATUS: |
2301 | td->urb->actual_length = requested; |
2302 | goto finish_td; |
2303 | default: |
2304 | xhci_warn(xhci, "WARN: unexpected TRB Type %d\n" , |
2305 | trb_type); |
2306 | goto finish_td; |
2307 | } |
2308 | case COMP_STOPPED_LENGTH_INVALID: |
2309 | goto finish_td; |
2310 | default: |
2311 | if (!xhci_requires_manual_halt_cleanup(xhci, |
2312 | ep_ctx, trb_comp_code)) |
2313 | break; |
2314 | xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n" , |
2315 | trb_comp_code, ep->ep_index); |
2316 | fallthrough; |
2317 | case COMP_STALL_ERROR: |
2318 | /* Did we transfer part of the data (middle) phase? */ |
2319 | if (trb_type == TRB_DATA || trb_type == TRB_NORMAL) |
2320 | td->urb->actual_length = requested - remaining; |
2321 | else if (!td->urb_length_set) |
2322 | td->urb->actual_length = 0; |
2323 | goto finish_td; |
2324 | } |
2325 | |
2326 | /* stopped at setup stage, no data transferred */ |
2327 | if (trb_type == TRB_SETUP) |
2328 | goto finish_td; |
2329 | |
2330 | /* |
2331 | * if on data stage then update the actual_length of the URB and flag it |
2332 | * as set, so it won't be overwritten in the event for the last TRB. |
2333 | */ |
2334 | if (trb_type == TRB_DATA || |
2335 | trb_type == TRB_NORMAL) { |
2336 | td->urb_length_set = true; |
2337 | td->urb->actual_length = requested - remaining; |
2338 | xhci_dbg(xhci, "Waiting for status stage event\n" ); |
2339 | return 0; |
2340 | } |
2341 | |
2342 | /* at status stage */ |
2343 | if (!td->urb_length_set) |
2344 | td->urb->actual_length = requested; |
2345 | |
2346 | finish_td: |
2347 | return finish_td(xhci, ep, ep_ring, td, trb_comp_code); |
2348 | } |
2349 | |
2350 | /* |
2351 | * Process isochronous tds, update urb packet status and actual_length. |
2352 | */ |
2353 | static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, |
2354 | struct xhci_ring *ep_ring, struct xhci_td *td, |
2355 | union xhci_trb *ep_trb, struct xhci_transfer_event *event) |
2356 | { |
2357 | struct urb_priv *urb_priv; |
2358 | int idx; |
2359 | struct usb_iso_packet_descriptor *frame; |
2360 | u32 trb_comp_code; |
2361 | bool sum_trbs_for_length = false; |
2362 | u32 remaining, requested, ep_trb_len; |
2363 | int short_framestatus; |
2364 | |
2365 | trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); |
2366 | urb_priv = td->urb->hcpriv; |
2367 | idx = urb_priv->num_tds_done; |
2368 | frame = &td->urb->iso_frame_desc[idx]; |
2369 | requested = frame->length; |
2370 | remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); |
2371 | ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); |
2372 | short_framestatus = td->urb->transfer_flags & URB_SHORT_NOT_OK ? |
2373 | -EREMOTEIO : 0; |
2374 | |
2375 | /* handle completion code */ |
2376 | switch (trb_comp_code) { |
2377 | case COMP_SUCCESS: |
2378 | if (remaining) { |
2379 | frame->status = short_framestatus; |
2380 | if (xhci->quirks & XHCI_TRUST_TX_LENGTH) |
2381 | sum_trbs_for_length = true; |
2382 | break; |
2383 | } |
2384 | frame->status = 0; |
2385 | break; |
2386 | case COMP_SHORT_PACKET: |
2387 | frame->status = short_framestatus; |
2388 | sum_trbs_for_length = true; |
2389 | break; |
2390 | case COMP_BANDWIDTH_OVERRUN_ERROR: |
2391 | frame->status = -ECOMM; |
2392 | break; |
2393 | case COMP_ISOCH_BUFFER_OVERRUN: |
2394 | case COMP_BABBLE_DETECTED_ERROR: |
2395 | frame->status = -EOVERFLOW; |
2396 | break; |
2397 | case COMP_INCOMPATIBLE_DEVICE_ERROR: |
2398 | case COMP_STALL_ERROR: |
2399 | frame->status = -EPROTO; |
2400 | break; |
2401 | case COMP_USB_TRANSACTION_ERROR: |
2402 | frame->status = -EPROTO; |
2403 | if (ep_trb != td->last_trb) |
2404 | return 0; |
2405 | break; |
2406 | case COMP_STOPPED: |
2407 | sum_trbs_for_length = true; |
2408 | break; |
2409 | case COMP_STOPPED_SHORT_PACKET: |
2410 | /* field normally containing residue now contains tranferred */ |
2411 | frame->status = short_framestatus; |
2412 | requested = remaining; |
2413 | break; |
2414 | case COMP_STOPPED_LENGTH_INVALID: |
2415 | requested = 0; |
2416 | remaining = 0; |
2417 | break; |
2418 | default: |
2419 | sum_trbs_for_length = true; |
2420 | frame->status = -1; |
2421 | break; |
2422 | } |
2423 | |
2424 | if (sum_trbs_for_length) |
2425 | frame->actual_length = sum_trb_lengths(xhci, ring: ep->ring, stop_trb: ep_trb) + |
2426 | ep_trb_len - remaining; |
2427 | else |
2428 | frame->actual_length = requested; |
2429 | |
2430 | td->urb->actual_length += frame->actual_length; |
2431 | |
2432 | return finish_td(xhci, ep, ep_ring, td, trb_comp_code); |
2433 | } |
2434 | |
2435 | static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, |
2436 | struct xhci_virt_ep *ep, int status) |
2437 | { |
2438 | struct urb_priv *urb_priv; |
2439 | struct usb_iso_packet_descriptor *frame; |
2440 | int idx; |
2441 | |
2442 | urb_priv = td->urb->hcpriv; |
2443 | idx = urb_priv->num_tds_done; |
2444 | frame = &td->urb->iso_frame_desc[idx]; |
2445 | |
2446 | /* The transfer is partly done. */ |
2447 | frame->status = -EXDEV; |
2448 | |
2449 | /* calc actual length */ |
2450 | frame->actual_length = 0; |
2451 | |
2452 | /* Update ring dequeue pointer */ |
2453 | ep->ring->dequeue = td->last_trb; |
2454 | ep->ring->deq_seg = td->last_trb_seg; |
2455 | inc_deq(xhci, ring: ep->ring); |
2456 | |
2457 | return xhci_td_cleanup(xhci, td, ep_ring: ep->ring, status); |
2458 | } |
2459 | |
2460 | /* |
2461 | * Process bulk and interrupt tds, update urb status and actual_length. |
2462 | */ |
2463 | static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, |
2464 | struct xhci_ring *ep_ring, struct xhci_td *td, |
2465 | union xhci_trb *ep_trb, struct xhci_transfer_event *event) |
2466 | { |
2467 | struct xhci_slot_ctx *slot_ctx; |
2468 | u32 trb_comp_code; |
2469 | u32 remaining, requested, ep_trb_len; |
2470 | |
2471 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: ep->vdev->out_ctx); |
2472 | trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); |
2473 | remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); |
2474 | ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); |
2475 | requested = td->urb->transfer_buffer_length; |
2476 | |
2477 | switch (trb_comp_code) { |
2478 | case COMP_SUCCESS: |
2479 | ep->err_count = 0; |
2480 | /* handle success with untransferred data as short packet */ |
2481 | if (ep_trb != td->last_trb || remaining) { |
2482 | xhci_warn(xhci, "WARN Successful completion on short TX\n" ); |
2483 | xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n" , |
2484 | td->urb->ep->desc.bEndpointAddress, |
2485 | requested, remaining); |
2486 | } |
2487 | td->status = 0; |
2488 | break; |
2489 | case COMP_SHORT_PACKET: |
2490 | xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n" , |
2491 | td->urb->ep->desc.bEndpointAddress, |
2492 | requested, remaining); |
2493 | td->status = 0; |
2494 | break; |
2495 | case COMP_STOPPED_SHORT_PACKET: |
2496 | td->urb->actual_length = remaining; |
2497 | goto finish_td; |
2498 | case COMP_STOPPED_LENGTH_INVALID: |
2499 | /* stopped on ep trb with invalid length, exclude it */ |
2500 | ep_trb_len = 0; |
2501 | remaining = 0; |
2502 | break; |
2503 | case COMP_USB_TRANSACTION_ERROR: |
2504 | if (xhci->quirks & XHCI_NO_SOFT_RETRY || |
2505 | (ep->err_count++ > MAX_SOFT_RETRY) || |
2506 | le32_to_cpu(slot_ctx->tt_info) & TT_SLOT) |
2507 | break; |
2508 | |
2509 | td->status = 0; |
2510 | |
2511 | xhci_handle_halted_endpoint(xhci, ep, td, reset_type: EP_SOFT_RESET); |
2512 | return 0; |
2513 | default: |
2514 | /* do nothing */ |
2515 | break; |
2516 | } |
2517 | |
2518 | if (ep_trb == td->last_trb) |
2519 | td->urb->actual_length = requested - remaining; |
2520 | else |
2521 | td->urb->actual_length = |
2522 | sum_trb_lengths(xhci, ring: ep_ring, stop_trb: ep_trb) + |
2523 | ep_trb_len - remaining; |
2524 | finish_td: |
2525 | if (remaining > requested) { |
2526 | xhci_warn(xhci, "bad transfer trb length %d in event trb\n" , |
2527 | remaining); |
2528 | td->urb->actual_length = 0; |
2529 | } |
2530 | |
2531 | return finish_td(xhci, ep, ep_ring, td, trb_comp_code); |
2532 | } |
2533 | |
2534 | /* |
2535 | * If this function returns an error condition, it means it got a Transfer |
2536 | * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address. |
2537 | * At this point, the host controller is probably hosed and should be reset. |
2538 | */ |
2539 | static int handle_tx_event(struct xhci_hcd *xhci, |
2540 | struct xhci_interrupter *ir, |
2541 | struct xhci_transfer_event *event) |
2542 | { |
2543 | struct xhci_virt_ep *ep; |
2544 | struct xhci_ring *ep_ring; |
2545 | unsigned int slot_id; |
2546 | int ep_index; |
2547 | struct xhci_td *td = NULL; |
2548 | dma_addr_t ep_trb_dma; |
2549 | struct xhci_segment *ep_seg; |
2550 | union xhci_trb *ep_trb; |
2551 | int status = -EINPROGRESS; |
2552 | struct xhci_ep_ctx *ep_ctx; |
2553 | u32 trb_comp_code; |
2554 | int td_num = 0; |
2555 | bool handling_skipped_tds = false; |
2556 | |
2557 | slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); |
2558 | ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; |
2559 | trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); |
2560 | ep_trb_dma = le64_to_cpu(event->buffer); |
2561 | |
2562 | ep = xhci_get_virt_ep(xhci, slot_id, ep_index); |
2563 | if (!ep) { |
2564 | xhci_err(xhci, "ERROR Invalid Transfer event\n" ); |
2565 | goto err_out; |
2566 | } |
2567 | |
2568 | ep_ring = xhci_dma_to_transfer_ring(ep, address: ep_trb_dma); |
2569 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: ep->vdev->out_ctx, ep_index); |
2570 | |
2571 | if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) { |
2572 | xhci_err(xhci, |
2573 | "ERROR Transfer event for disabled endpoint slot %u ep %u\n" , |
2574 | slot_id, ep_index); |
2575 | goto err_out; |
2576 | } |
2577 | |
2578 | /* Some transfer events don't always point to a trb, see xhci 4.17.4 */ |
2579 | if (!ep_ring) { |
2580 | switch (trb_comp_code) { |
2581 | case COMP_STALL_ERROR: |
2582 | case COMP_USB_TRANSACTION_ERROR: |
2583 | case COMP_INVALID_STREAM_TYPE_ERROR: |
2584 | case COMP_INVALID_STREAM_ID_ERROR: |
2585 | xhci_dbg(xhci, "Stream transaction error ep %u no id\n" , |
2586 | ep_index); |
2587 | if (ep->err_count++ > MAX_SOFT_RETRY) |
2588 | xhci_handle_halted_endpoint(xhci, ep, NULL, |
2589 | reset_type: EP_HARD_RESET); |
2590 | else |
2591 | xhci_handle_halted_endpoint(xhci, ep, NULL, |
2592 | reset_type: EP_SOFT_RESET); |
2593 | goto cleanup; |
2594 | case COMP_RING_UNDERRUN: |
2595 | case COMP_RING_OVERRUN: |
2596 | case COMP_STOPPED_LENGTH_INVALID: |
2597 | goto cleanup; |
2598 | default: |
2599 | xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n" , |
2600 | slot_id, ep_index); |
2601 | goto err_out; |
2602 | } |
2603 | } |
2604 | |
2605 | /* Count current td numbers if ep->skip is set */ |
2606 | if (ep->skip) |
2607 | td_num += list_count_nodes(head: &ep_ring->td_list); |
2608 | |
2609 | /* Look for common error cases */ |
2610 | switch (trb_comp_code) { |
2611 | /* Skip codes that require special handling depending on |
2612 | * transfer type |
2613 | */ |
2614 | case COMP_SUCCESS: |
2615 | if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) |
2616 | break; |
2617 | if (xhci->quirks & XHCI_TRUST_TX_LENGTH || |
2618 | ep_ring->last_td_was_short) |
2619 | trb_comp_code = COMP_SHORT_PACKET; |
2620 | else |
2621 | xhci_warn_ratelimited(xhci, |
2622 | "WARN Successful completion on short TX for slot %u ep %u: needs XHCI_TRUST_TX_LENGTH quirk?\n" , |
2623 | slot_id, ep_index); |
2624 | break; |
2625 | case COMP_SHORT_PACKET: |
2626 | break; |
2627 | /* Completion codes for endpoint stopped state */ |
2628 | case COMP_STOPPED: |
2629 | xhci_dbg(xhci, "Stopped on Transfer TRB for slot %u ep %u\n" , |
2630 | slot_id, ep_index); |
2631 | break; |
2632 | case COMP_STOPPED_LENGTH_INVALID: |
2633 | xhci_dbg(xhci, |
2634 | "Stopped on No-op or Link TRB for slot %u ep %u\n" , |
2635 | slot_id, ep_index); |
2636 | break; |
2637 | case COMP_STOPPED_SHORT_PACKET: |
2638 | xhci_dbg(xhci, |
2639 | "Stopped with short packet transfer detected for slot %u ep %u\n" , |
2640 | slot_id, ep_index); |
2641 | break; |
2642 | /* Completion codes for endpoint halted state */ |
2643 | case COMP_STALL_ERROR: |
2644 | xhci_dbg(xhci, "Stalled endpoint for slot %u ep %u\n" , slot_id, |
2645 | ep_index); |
2646 | status = -EPIPE; |
2647 | break; |
2648 | case COMP_SPLIT_TRANSACTION_ERROR: |
2649 | xhci_dbg(xhci, "Split transaction error for slot %u ep %u\n" , |
2650 | slot_id, ep_index); |
2651 | status = -EPROTO; |
2652 | break; |
2653 | case COMP_USB_TRANSACTION_ERROR: |
2654 | xhci_dbg(xhci, "Transfer error for slot %u ep %u on endpoint\n" , |
2655 | slot_id, ep_index); |
2656 | status = -EPROTO; |
2657 | break; |
2658 | case COMP_BABBLE_DETECTED_ERROR: |
2659 | xhci_dbg(xhci, "Babble error for slot %u ep %u on endpoint\n" , |
2660 | slot_id, ep_index); |
2661 | status = -EOVERFLOW; |
2662 | break; |
2663 | /* Completion codes for endpoint error state */ |
2664 | case COMP_TRB_ERROR: |
2665 | xhci_warn(xhci, |
2666 | "WARN: TRB error for slot %u ep %u on endpoint\n" , |
2667 | slot_id, ep_index); |
2668 | status = -EILSEQ; |
2669 | break; |
2670 | /* completion codes not indicating endpoint state change */ |
2671 | case COMP_DATA_BUFFER_ERROR: |
2672 | xhci_warn(xhci, |
2673 | "WARN: HC couldn't access mem fast enough for slot %u ep %u\n" , |
2674 | slot_id, ep_index); |
2675 | status = -ENOSR; |
2676 | break; |
2677 | case COMP_BANDWIDTH_OVERRUN_ERROR: |
2678 | xhci_warn(xhci, |
2679 | "WARN: bandwidth overrun event for slot %u ep %u on endpoint\n" , |
2680 | slot_id, ep_index); |
2681 | break; |
2682 | case COMP_ISOCH_BUFFER_OVERRUN: |
2683 | xhci_warn(xhci, |
2684 | "WARN: buffer overrun event for slot %u ep %u on endpoint" , |
2685 | slot_id, ep_index); |
2686 | break; |
2687 | case COMP_RING_UNDERRUN: |
2688 | /* |
2689 | * When the Isoch ring is empty, the xHC will generate |
2690 | * a Ring Overrun Event for IN Isoch endpoint or Ring |
2691 | * Underrun Event for OUT Isoch endpoint. |
2692 | */ |
2693 | xhci_dbg(xhci, "underrun event on endpoint\n" ); |
2694 | if (!list_empty(head: &ep_ring->td_list)) |
2695 | xhci_dbg(xhci, "Underrun Event for slot %d ep %d " |
2696 | "still with TDs queued?\n" , |
2697 | TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), |
2698 | ep_index); |
2699 | goto cleanup; |
2700 | case COMP_RING_OVERRUN: |
2701 | xhci_dbg(xhci, "overrun event on endpoint\n" ); |
2702 | if (!list_empty(head: &ep_ring->td_list)) |
2703 | xhci_dbg(xhci, "Overrun Event for slot %d ep %d " |
2704 | "still with TDs queued?\n" , |
2705 | TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), |
2706 | ep_index); |
2707 | goto cleanup; |
2708 | case COMP_MISSED_SERVICE_ERROR: |
2709 | /* |
2710 | * When encounter missed service error, one or more isoc tds |
2711 | * may be missed by xHC. |
2712 | * Set skip flag of the ep_ring; Complete the missed tds as |
2713 | * short transfer when process the ep_ring next time. |
2714 | */ |
2715 | ep->skip = true; |
2716 | xhci_dbg(xhci, |
2717 | "Miss service interval error for slot %u ep %u, set skip flag\n" , |
2718 | slot_id, ep_index); |
2719 | goto cleanup; |
2720 | case COMP_NO_PING_RESPONSE_ERROR: |
2721 | ep->skip = true; |
2722 | xhci_dbg(xhci, |
2723 | "No Ping response error for slot %u ep %u, Skip one Isoc TD\n" , |
2724 | slot_id, ep_index); |
2725 | goto cleanup; |
2726 | |
2727 | case COMP_INCOMPATIBLE_DEVICE_ERROR: |
2728 | /* needs disable slot command to recover */ |
2729 | xhci_warn(xhci, |
2730 | "WARN: detect an incompatible device for slot %u ep %u" , |
2731 | slot_id, ep_index); |
2732 | status = -EPROTO; |
2733 | break; |
2734 | default: |
2735 | if (xhci_is_vendor_info_code(xhci, trb_comp_code)) { |
2736 | status = 0; |
2737 | break; |
2738 | } |
2739 | xhci_warn(xhci, |
2740 | "ERROR Unknown event condition %u for slot %u ep %u , HC probably busted\n" , |
2741 | trb_comp_code, slot_id, ep_index); |
2742 | goto cleanup; |
2743 | } |
2744 | |
2745 | do { |
2746 | /* This TRB should be in the TD at the head of this ring's |
2747 | * TD list. |
2748 | */ |
2749 | if (list_empty(head: &ep_ring->td_list)) { |
2750 | /* |
2751 | * Don't print wanings if it's due to a stopped endpoint |
2752 | * generating an extra completion event if the device |
2753 | * was suspended. Or, a event for the last TRB of a |
2754 | * short TD we already got a short event for. |
2755 | * The short TD is already removed from the TD list. |
2756 | */ |
2757 | |
2758 | if (!(trb_comp_code == COMP_STOPPED || |
2759 | trb_comp_code == COMP_STOPPED_LENGTH_INVALID || |
2760 | ep_ring->last_td_was_short)) { |
2761 | xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n" , |
2762 | TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), |
2763 | ep_index); |
2764 | } |
2765 | if (ep->skip) { |
2766 | ep->skip = false; |
2767 | xhci_dbg(xhci, "td_list is empty while skip flag set. Clear skip flag for slot %u ep %u.\n" , |
2768 | slot_id, ep_index); |
2769 | } |
2770 | if (trb_comp_code == COMP_STALL_ERROR || |
2771 | xhci_requires_manual_halt_cleanup(xhci, ep_ctx, |
2772 | trb_comp_code)) { |
2773 | xhci_handle_halted_endpoint(xhci, ep, NULL, |
2774 | reset_type: EP_HARD_RESET); |
2775 | } |
2776 | goto cleanup; |
2777 | } |
2778 | |
2779 | /* We've skipped all the TDs on the ep ring when ep->skip set */ |
2780 | if (ep->skip && td_num == 0) { |
2781 | ep->skip = false; |
2782 | xhci_dbg(xhci, "All tds on the ep_ring skipped. Clear skip flag for slot %u ep %u.\n" , |
2783 | slot_id, ep_index); |
2784 | goto cleanup; |
2785 | } |
2786 | |
2787 | td = list_first_entry(&ep_ring->td_list, struct xhci_td, |
2788 | td_list); |
2789 | if (ep->skip) |
2790 | td_num--; |
2791 | |
2792 | /* Is this a TRB in the currently executing TD? */ |
2793 | ep_seg = trb_in_td(xhci, start_seg: ep_ring->deq_seg, start_trb: ep_ring->dequeue, |
2794 | end_trb: td->last_trb, suspect_dma: ep_trb_dma, debug: false); |
2795 | |
2796 | /* |
2797 | * Skip the Force Stopped Event. The event_trb(event_dma) of FSE |
2798 | * is not in the current TD pointed by ep_ring->dequeue because |
2799 | * that the hardware dequeue pointer still at the previous TRB |
2800 | * of the current TD. The previous TRB maybe a Link TD or the |
2801 | * last TRB of the previous TD. The command completion handle |
2802 | * will take care the rest. |
2803 | */ |
2804 | if (!ep_seg && (trb_comp_code == COMP_STOPPED || |
2805 | trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) { |
2806 | goto cleanup; |
2807 | } |
2808 | |
2809 | if (!ep_seg) { |
2810 | if (!ep->skip || |
2811 | !usb_endpoint_xfer_isoc(epd: &td->urb->ep->desc)) { |
2812 | /* Some host controllers give a spurious |
2813 | * successful event after a short transfer. |
2814 | * Ignore it. |
2815 | */ |
2816 | if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && |
2817 | ep_ring->last_td_was_short) { |
2818 | ep_ring->last_td_was_short = false; |
2819 | goto cleanup; |
2820 | } |
2821 | /* HC is busted, give up! */ |
2822 | xhci_err(xhci, |
2823 | "ERROR Transfer event TRB DMA ptr not " |
2824 | "part of current TD ep_index %d " |
2825 | "comp_code %u\n" , ep_index, |
2826 | trb_comp_code); |
2827 | trb_in_td(xhci, start_seg: ep_ring->deq_seg, |
2828 | start_trb: ep_ring->dequeue, end_trb: td->last_trb, |
2829 | suspect_dma: ep_trb_dma, debug: true); |
2830 | return -ESHUTDOWN; |
2831 | } |
2832 | |
2833 | skip_isoc_td(xhci, td, ep, status); |
2834 | goto cleanup; |
2835 | } |
2836 | if (trb_comp_code == COMP_SHORT_PACKET) |
2837 | ep_ring->last_td_was_short = true; |
2838 | else |
2839 | ep_ring->last_td_was_short = false; |
2840 | |
2841 | if (ep->skip) { |
2842 | xhci_dbg(xhci, |
2843 | "Found td. Clear skip flag for slot %u ep %u.\n" , |
2844 | slot_id, ep_index); |
2845 | ep->skip = false; |
2846 | } |
2847 | |
2848 | ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) / |
2849 | sizeof(*ep_trb)]; |
2850 | |
2851 | trace_xhci_handle_transfer(ring: ep_ring, |
2852 | trb: (struct xhci_generic_trb *) ep_trb); |
2853 | |
2854 | /* |
2855 | * No-op TRB could trigger interrupts in a case where |
2856 | * a URB was killed and a STALL_ERROR happens right |
2857 | * after the endpoint ring stopped. Reset the halted |
2858 | * endpoint. Otherwise, the endpoint remains stalled |
2859 | * indefinitely. |
2860 | */ |
2861 | |
2862 | if (trb_is_noop(trb: ep_trb)) { |
2863 | if (trb_comp_code == COMP_STALL_ERROR || |
2864 | xhci_requires_manual_halt_cleanup(xhci, ep_ctx, |
2865 | trb_comp_code)) |
2866 | xhci_handle_halted_endpoint(xhci, ep, td, |
2867 | reset_type: EP_HARD_RESET); |
2868 | goto cleanup; |
2869 | } |
2870 | |
2871 | td->status = status; |
2872 | |
2873 | /* update the urb's actual_length and give back to the core */ |
2874 | if (usb_endpoint_xfer_control(epd: &td->urb->ep->desc)) |
2875 | process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event); |
2876 | else if (usb_endpoint_xfer_isoc(epd: &td->urb->ep->desc)) |
2877 | process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event); |
2878 | else |
2879 | process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event); |
2880 | cleanup: |
2881 | handling_skipped_tds = ep->skip && |
2882 | trb_comp_code != COMP_MISSED_SERVICE_ERROR && |
2883 | trb_comp_code != COMP_NO_PING_RESPONSE_ERROR; |
2884 | |
2885 | /* |
2886 | * If ep->skip is set, it means there are missed tds on the |
2887 | * endpoint ring need to take care of. |
2888 | * Process them as short transfer until reach the td pointed by |
2889 | * the event. |
2890 | */ |
2891 | } while (handling_skipped_tds); |
2892 | |
2893 | return 0; |
2894 | |
2895 | err_out: |
2896 | xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n" , |
2897 | (unsigned long long) xhci_trb_virt_to_dma( |
2898 | ir->event_ring->deq_seg, |
2899 | ir->event_ring->dequeue), |
2900 | lower_32_bits(le64_to_cpu(event->buffer)), |
2901 | upper_32_bits(le64_to_cpu(event->buffer)), |
2902 | le32_to_cpu(event->transfer_len), |
2903 | le32_to_cpu(event->flags)); |
2904 | return -ENODEV; |
2905 | } |
2906 | |
2907 | /* |
2908 | * This function handles all OS-owned events on the event ring. It may drop |
2909 | * xhci->lock between event processing (e.g. to pass up port status changes). |
2910 | * Returns >0 for "possibly more events to process" (caller should call again), |
2911 | * otherwise 0 if done. In future, <0 returns should indicate error code. |
2912 | */ |
2913 | static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir) |
2914 | { |
2915 | union xhci_trb *event; |
2916 | u32 trb_type; |
2917 | |
2918 | /* Event ring hasn't been allocated yet. */ |
2919 | if (!ir || !ir->event_ring || !ir->event_ring->dequeue) { |
2920 | xhci_err(xhci, "ERROR interrupter not ready\n" ); |
2921 | return -ENOMEM; |
2922 | } |
2923 | |
2924 | event = ir->event_ring->dequeue; |
2925 | /* Does the HC or OS own the TRB? */ |
2926 | if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) != |
2927 | ir->event_ring->cycle_state) |
2928 | return 0; |
2929 | |
2930 | trace_xhci_handle_event(ring: ir->event_ring, trb: &event->generic); |
2931 | |
2932 | /* |
2933 | * Barrier between reading the TRB_CYCLE (valid) flag above and any |
2934 | * speculative reads of the event's flags/data below. |
2935 | */ |
2936 | rmb(); |
2937 | trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags)); |
2938 | /* FIXME: Handle more event types. */ |
2939 | |
2940 | switch (trb_type) { |
2941 | case TRB_COMPLETION: |
2942 | handle_cmd_completion(xhci, event: &event->event_cmd); |
2943 | break; |
2944 | case TRB_PORT_STATUS: |
2945 | handle_port_status(xhci, ir, event); |
2946 | break; |
2947 | case TRB_TRANSFER: |
2948 | handle_tx_event(xhci, ir, event: &event->trans_event); |
2949 | break; |
2950 | case TRB_DEV_NOTE: |
2951 | handle_device_notification(xhci, event); |
2952 | break; |
2953 | default: |
2954 | if (trb_type >= TRB_VENDOR_DEFINED_LOW) |
2955 | handle_vendor_event(xhci, event, trb_type); |
2956 | else |
2957 | xhci_warn(xhci, "ERROR unknown event type %d\n" , trb_type); |
2958 | } |
2959 | /* Any of the above functions may drop and re-acquire the lock, so check |
2960 | * to make sure a watchdog timer didn't mark the host as non-responsive. |
2961 | */ |
2962 | if (xhci->xhc_state & XHCI_STATE_DYING) { |
2963 | xhci_dbg(xhci, "xHCI host dying, returning from " |
2964 | "event handler.\n" ); |
2965 | return 0; |
2966 | } |
2967 | |
2968 | /* Update SW event ring dequeue pointer */ |
2969 | inc_deq(xhci, ring: ir->event_ring); |
2970 | |
2971 | /* Are there more items on the event ring? Caller will call us again to |
2972 | * check. |
2973 | */ |
2974 | return 1; |
2975 | } |
2976 | |
2977 | /* |
2978 | * Update Event Ring Dequeue Pointer: |
2979 | * - When all events have finished |
2980 | * - To avoid "Event Ring Full Error" condition |
2981 | */ |
2982 | static void xhci_update_erst_dequeue(struct xhci_hcd *xhci, |
2983 | struct xhci_interrupter *ir, |
2984 | union xhci_trb *event_ring_deq, |
2985 | bool clear_ehb) |
2986 | { |
2987 | u64 temp_64; |
2988 | dma_addr_t deq; |
2989 | |
2990 | temp_64 = xhci_read_64(xhci, regs: &ir->ir_set->erst_dequeue); |
2991 | /* If necessary, update the HW's version of the event ring deq ptr. */ |
2992 | if (event_ring_deq != ir->event_ring->dequeue) { |
2993 | deq = xhci_trb_virt_to_dma(seg: ir->event_ring->deq_seg, |
2994 | trb: ir->event_ring->dequeue); |
2995 | if (deq == 0) |
2996 | xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n" ); |
2997 | /* |
2998 | * Per 4.9.4, Software writes to the ERDP register shall |
2999 | * always advance the Event Ring Dequeue Pointer value. |
3000 | */ |
3001 | if ((temp_64 & ERST_PTR_MASK) == (deq & ERST_PTR_MASK)) |
3002 | return; |
3003 | |
3004 | /* Update HC event ring dequeue pointer */ |
3005 | temp_64 = ir->event_ring->deq_seg->num & ERST_DESI_MASK; |
3006 | temp_64 |= deq & ERST_PTR_MASK; |
3007 | } |
3008 | |
3009 | /* Clear the event handler busy flag (RW1C) */ |
3010 | if (clear_ehb) |
3011 | temp_64 |= ERST_EHB; |
3012 | xhci_write_64(xhci, val: temp_64, regs: &ir->ir_set->erst_dequeue); |
3013 | } |
3014 | |
3015 | /* |
3016 | * xHCI spec says we can get an interrupt, and if the HC has an error condition, |
3017 | * we might get bad data out of the event ring. Section 4.10.2.7 has a list of |
3018 | * indicators of an event TRB error, but we check the status *first* to be safe. |
3019 | */ |
3020 | irqreturn_t xhci_irq(struct usb_hcd *hcd) |
3021 | { |
3022 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
3023 | union xhci_trb *event_ring_deq; |
3024 | struct xhci_interrupter *ir; |
3025 | irqreturn_t ret = IRQ_NONE; |
3026 | u64 temp_64; |
3027 | u32 status; |
3028 | int event_loop = 0; |
3029 | |
3030 | spin_lock(lock: &xhci->lock); |
3031 | /* Check if the xHC generated the interrupt, or the irq is shared */ |
3032 | status = readl(addr: &xhci->op_regs->status); |
3033 | if (status == ~(u32)0) { |
3034 | xhci_hc_died(xhci); |
3035 | ret = IRQ_HANDLED; |
3036 | goto out; |
3037 | } |
3038 | |
3039 | if (!(status & STS_EINT)) |
3040 | goto out; |
3041 | |
3042 | if (status & STS_HCE) { |
3043 | xhci_warn(xhci, "WARNING: Host Controller Error\n" ); |
3044 | goto out; |
3045 | } |
3046 | |
3047 | if (status & STS_FATAL) { |
3048 | xhci_warn(xhci, "WARNING: Host System Error\n" ); |
3049 | xhci_halt(xhci); |
3050 | ret = IRQ_HANDLED; |
3051 | goto out; |
3052 | } |
3053 | |
3054 | /* |
3055 | * Clear the op reg interrupt status first, |
3056 | * so we can receive interrupts from other MSI-X interrupters. |
3057 | * Write 1 to clear the interrupt status. |
3058 | */ |
3059 | status |= STS_EINT; |
3060 | writel(val: status, addr: &xhci->op_regs->status); |
3061 | |
3062 | /* This is the handler of the primary interrupter */ |
3063 | ir = xhci->interrupter; |
3064 | if (!hcd->msi_enabled) { |
3065 | u32 irq_pending; |
3066 | irq_pending = readl(addr: &ir->ir_set->irq_pending); |
3067 | irq_pending |= IMAN_IP; |
3068 | writel(val: irq_pending, addr: &ir->ir_set->irq_pending); |
3069 | } |
3070 | |
3071 | if (xhci->xhc_state & XHCI_STATE_DYING || |
3072 | xhci->xhc_state & XHCI_STATE_HALTED) { |
3073 | xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " |
3074 | "Shouldn't IRQs be disabled?\n" ); |
3075 | /* Clear the event handler busy flag (RW1C); |
3076 | * the event ring should be empty. |
3077 | */ |
3078 | temp_64 = xhci_read_64(xhci, regs: &ir->ir_set->erst_dequeue); |
3079 | xhci_write_64(xhci, val: temp_64 | ERST_EHB, |
3080 | regs: &ir->ir_set->erst_dequeue); |
3081 | ret = IRQ_HANDLED; |
3082 | goto out; |
3083 | } |
3084 | |
3085 | event_ring_deq = ir->event_ring->dequeue; |
3086 | /* FIXME this should be a delayed service routine |
3087 | * that clears the EHB. |
3088 | */ |
3089 | while (xhci_handle_event(xhci, ir) > 0) { |
3090 | if (event_loop++ < TRBS_PER_SEGMENT / 2) |
3091 | continue; |
3092 | xhci_update_erst_dequeue(xhci, ir, event_ring_deq, clear_ehb: false); |
3093 | event_ring_deq = ir->event_ring->dequeue; |
3094 | |
3095 | /* ring is half-full, force isoc trbs to interrupt more often */ |
3096 | if (xhci->isoc_bei_interval > AVOID_BEI_INTERVAL_MIN) |
3097 | xhci->isoc_bei_interval = xhci->isoc_bei_interval / 2; |
3098 | |
3099 | event_loop = 0; |
3100 | } |
3101 | |
3102 | xhci_update_erst_dequeue(xhci, ir, event_ring_deq, clear_ehb: true); |
3103 | ret = IRQ_HANDLED; |
3104 | |
3105 | out: |
3106 | spin_unlock(lock: &xhci->lock); |
3107 | |
3108 | return ret; |
3109 | } |
3110 | |
3111 | irqreturn_t xhci_msi_irq(int irq, void *hcd) |
3112 | { |
3113 | return xhci_irq(hcd); |
3114 | } |
3115 | EXPORT_SYMBOL_GPL(xhci_msi_irq); |
3116 | |
3117 | /**** Endpoint Ring Operations ****/ |
3118 | |
3119 | /* |
3120 | * Generic function for queueing a TRB on a ring. |
3121 | * The caller must have checked to make sure there's room on the ring. |
3122 | * |
3123 | * @more_trbs_coming: Will you enqueue more TRBs before calling |
3124 | * prepare_transfer()? |
3125 | */ |
3126 | static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, |
3127 | bool more_trbs_coming, |
3128 | u32 field1, u32 field2, u32 field3, u32 field4) |
3129 | { |
3130 | struct xhci_generic_trb *trb; |
3131 | |
3132 | trb = &ring->enqueue->generic; |
3133 | trb->field[0] = cpu_to_le32(field1); |
3134 | trb->field[1] = cpu_to_le32(field2); |
3135 | trb->field[2] = cpu_to_le32(field3); |
3136 | /* make sure TRB is fully written before giving it to the controller */ |
3137 | wmb(); |
3138 | trb->field[3] = cpu_to_le32(field4); |
3139 | |
3140 | trace_xhci_queue_trb(ring, trb); |
3141 | |
3142 | inc_enq(xhci, ring, more_trbs_coming); |
3143 | } |
3144 | |
3145 | /* |
3146 | * Does various checks on the endpoint ring, and makes it ready to queue num_trbs. |
3147 | * expand ring if it start to be full. |
3148 | */ |
3149 | static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, |
3150 | u32 ep_state, unsigned int num_trbs, gfp_t mem_flags) |
3151 | { |
3152 | unsigned int link_trb_count = 0; |
3153 | unsigned int new_segs = 0; |
3154 | |
3155 | /* Make sure the endpoint has been added to xHC schedule */ |
3156 | switch (ep_state) { |
3157 | case EP_STATE_DISABLED: |
3158 | /* |
3159 | * USB core changed config/interfaces without notifying us, |
3160 | * or hardware is reporting the wrong state. |
3161 | */ |
3162 | xhci_warn(xhci, "WARN urb submitted to disabled ep\n" ); |
3163 | return -ENOENT; |
3164 | case EP_STATE_ERROR: |
3165 | xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n" ); |
3166 | /* FIXME event handling code for error needs to clear it */ |
3167 | /* XXX not sure if this should be -ENOENT or not */ |
3168 | return -EINVAL; |
3169 | case EP_STATE_HALTED: |
3170 | xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n" ); |
3171 | break; |
3172 | case EP_STATE_STOPPED: |
3173 | case EP_STATE_RUNNING: |
3174 | break; |
3175 | default: |
3176 | xhci_err(xhci, "ERROR unknown endpoint state for ep\n" ); |
3177 | /* |
3178 | * FIXME issue Configure Endpoint command to try to get the HC |
3179 | * back into a known state. |
3180 | */ |
3181 | return -EINVAL; |
3182 | } |
3183 | |
3184 | if (ep_ring != xhci->cmd_ring) { |
3185 | new_segs = xhci_ring_expansion_needed(xhci, ring: ep_ring, num_trbs); |
3186 | } else if (xhci_num_trbs_free(xhci, ring: ep_ring) <= num_trbs) { |
3187 | xhci_err(xhci, "Do not support expand command ring\n" ); |
3188 | return -ENOMEM; |
3189 | } |
3190 | |
3191 | if (new_segs) { |
3192 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_ring_expansion, |
3193 | fmt: "ERROR no room on ep ring, try ring expansion" ); |
3194 | if (xhci_ring_expansion(xhci, ring: ep_ring, num_trbs: new_segs, flags: mem_flags)) { |
3195 | xhci_err(xhci, "Ring expansion failed\n" ); |
3196 | return -ENOMEM; |
3197 | } |
3198 | } |
3199 | |
3200 | while (trb_is_link(trb: ep_ring->enqueue)) { |
3201 | /* If we're not dealing with 0.95 hardware or isoc rings |
3202 | * on AMD 0.96 host, clear the chain bit. |
3203 | */ |
3204 | if (!xhci_link_trb_quirk(xhci) && |
3205 | !(ep_ring->type == TYPE_ISOC && |
3206 | (xhci->quirks & XHCI_AMD_0x96_HOST))) |
3207 | ep_ring->enqueue->link.control &= |
3208 | cpu_to_le32(~TRB_CHAIN); |
3209 | else |
3210 | ep_ring->enqueue->link.control |= |
3211 | cpu_to_le32(TRB_CHAIN); |
3212 | |
3213 | wmb(); |
3214 | ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE); |
3215 | |
3216 | /* Toggle the cycle bit after the last ring segment. */ |
3217 | if (link_trb_toggles_cycle(trb: ep_ring->enqueue)) |
3218 | ep_ring->cycle_state ^= 1; |
3219 | |
3220 | ep_ring->enq_seg = ep_ring->enq_seg->next; |
3221 | ep_ring->enqueue = ep_ring->enq_seg->trbs; |
3222 | |
3223 | /* prevent infinite loop if all first trbs are link trbs */ |
3224 | if (link_trb_count++ > ep_ring->num_segs) { |
3225 | xhci_warn(xhci, "Ring is an endless link TRB loop\n" ); |
3226 | return -EINVAL; |
3227 | } |
3228 | } |
3229 | |
3230 | if (last_trb_on_seg(seg: ep_ring->enq_seg, trb: ep_ring->enqueue)) { |
3231 | xhci_warn(xhci, "Missing link TRB at end of ring segment\n" ); |
3232 | return -EINVAL; |
3233 | } |
3234 | |
3235 | return 0; |
3236 | } |
3237 | |
3238 | static int prepare_transfer(struct xhci_hcd *xhci, |
3239 | struct xhci_virt_device *xdev, |
3240 | unsigned int ep_index, |
3241 | unsigned int stream_id, |
3242 | unsigned int num_trbs, |
3243 | struct urb *urb, |
3244 | unsigned int td_index, |
3245 | gfp_t mem_flags) |
3246 | { |
3247 | int ret; |
3248 | struct urb_priv *urb_priv; |
3249 | struct xhci_td *td; |
3250 | struct xhci_ring *ep_ring; |
3251 | struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, ctx: xdev->out_ctx, ep_index); |
3252 | |
3253 | ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id: xdev->slot_id, ep_index, |
3254 | stream_id); |
3255 | if (!ep_ring) { |
3256 | xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n" , |
3257 | stream_id); |
3258 | return -EINVAL; |
3259 | } |
3260 | |
3261 | ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx), |
3262 | num_trbs, mem_flags); |
3263 | if (ret) |
3264 | return ret; |
3265 | |
3266 | urb_priv = urb->hcpriv; |
3267 | td = &urb_priv->td[td_index]; |
3268 | |
3269 | INIT_LIST_HEAD(list: &td->td_list); |
3270 | INIT_LIST_HEAD(list: &td->cancelled_td_list); |
3271 | |
3272 | if (td_index == 0) { |
3273 | ret = usb_hcd_link_urb_to_ep(hcd: bus_to_hcd(bus: urb->dev->bus), urb); |
3274 | if (unlikely(ret)) |
3275 | return ret; |
3276 | } |
3277 | |
3278 | td->urb = urb; |
3279 | /* Add this TD to the tail of the endpoint ring's TD list */ |
3280 | list_add_tail(new: &td->td_list, head: &ep_ring->td_list); |
3281 | td->start_seg = ep_ring->enq_seg; |
3282 | td->first_trb = ep_ring->enqueue; |
3283 | |
3284 | return 0; |
3285 | } |
3286 | |
3287 | unsigned int count_trbs(u64 addr, u64 len) |
3288 | { |
3289 | unsigned int num_trbs; |
3290 | |
3291 | num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)), |
3292 | TRB_MAX_BUFF_SIZE); |
3293 | if (num_trbs == 0) |
3294 | num_trbs++; |
3295 | |
3296 | return num_trbs; |
3297 | } |
3298 | |
3299 | static inline unsigned int count_trbs_needed(struct urb *urb) |
3300 | { |
3301 | return count_trbs(addr: urb->transfer_dma, len: urb->transfer_buffer_length); |
3302 | } |
3303 | |
3304 | static unsigned int count_sg_trbs_needed(struct urb *urb) |
3305 | { |
3306 | struct scatterlist *sg; |
3307 | unsigned int i, len, full_len, num_trbs = 0; |
3308 | |
3309 | full_len = urb->transfer_buffer_length; |
3310 | |
3311 | for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) { |
3312 | len = sg_dma_len(sg); |
3313 | num_trbs += count_trbs(sg_dma_address(sg), len); |
3314 | len = min_t(unsigned int, len, full_len); |
3315 | full_len -= len; |
3316 | if (full_len == 0) |
3317 | break; |
3318 | } |
3319 | |
3320 | return num_trbs; |
3321 | } |
3322 | |
3323 | static unsigned int count_isoc_trbs_needed(struct urb *urb, int i) |
3324 | { |
3325 | u64 addr, len; |
3326 | |
3327 | addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); |
3328 | len = urb->iso_frame_desc[i].length; |
3329 | |
3330 | return count_trbs(addr, len); |
3331 | } |
3332 | |
3333 | static void check_trb_math(struct urb *urb, int running_total) |
3334 | { |
3335 | if (unlikely(running_total != urb->transfer_buffer_length)) |
3336 | dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " |
3337 | "queued %#x (%d), asked for %#x (%d)\n" , |
3338 | __func__, |
3339 | urb->ep->desc.bEndpointAddress, |
3340 | running_total, running_total, |
3341 | urb->transfer_buffer_length, |
3342 | urb->transfer_buffer_length); |
3343 | } |
3344 | |
3345 | static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, |
3346 | unsigned int ep_index, unsigned int stream_id, int start_cycle, |
3347 | struct xhci_generic_trb *start_trb) |
3348 | { |
3349 | /* |
3350 | * Pass all the TRBs to the hardware at once and make sure this write |
3351 | * isn't reordered. |
3352 | */ |
3353 | wmb(); |
3354 | if (start_cycle) |
3355 | start_trb->field[3] |= cpu_to_le32(start_cycle); |
3356 | else |
3357 | start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE); |
3358 | xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); |
3359 | } |
3360 | |
3361 | static void check_interval(struct xhci_hcd *xhci, struct urb *urb, |
3362 | struct xhci_ep_ctx *ep_ctx) |
3363 | { |
3364 | int xhci_interval; |
3365 | int ep_interval; |
3366 | |
3367 | xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info)); |
3368 | ep_interval = urb->interval; |
3369 | |
3370 | /* Convert to microframes */ |
3371 | if (urb->dev->speed == USB_SPEED_LOW || |
3372 | urb->dev->speed == USB_SPEED_FULL) |
3373 | ep_interval *= 8; |
3374 | |
3375 | /* FIXME change this to a warning and a suggestion to use the new API |
3376 | * to set the polling interval (once the API is added). |
3377 | */ |
3378 | if (xhci_interval != ep_interval) { |
3379 | dev_dbg_ratelimited(&urb->dev->dev, |
3380 | "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n" , |
3381 | ep_interval, ep_interval == 1 ? "" : "s" , |
3382 | xhci_interval, xhci_interval == 1 ? "" : "s" ); |
3383 | urb->interval = xhci_interval; |
3384 | /* Convert back to frames for LS/FS devices */ |
3385 | if (urb->dev->speed == USB_SPEED_LOW || |
3386 | urb->dev->speed == USB_SPEED_FULL) |
3387 | urb->interval /= 8; |
3388 | } |
3389 | } |
3390 | |
3391 | /* |
3392 | * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt |
3393 | * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD |
3394 | * (comprised of sg list entries) can take several service intervals to |
3395 | * transmit. |
3396 | */ |
3397 | int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, |
3398 | struct urb *urb, int slot_id, unsigned int ep_index) |
3399 | { |
3400 | struct xhci_ep_ctx *ep_ctx; |
3401 | |
3402 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: xhci->devs[slot_id]->out_ctx, ep_index); |
3403 | check_interval(xhci, urb, ep_ctx); |
3404 | |
3405 | return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index); |
3406 | } |
3407 | |
3408 | /* |
3409 | * For xHCI 1.0 host controllers, TD size is the number of max packet sized |
3410 | * packets remaining in the TD (*not* including this TRB). |
3411 | * |
3412 | * Total TD packet count = total_packet_count = |
3413 | * DIV_ROUND_UP(TD size in bytes / wMaxPacketSize) |
3414 | * |
3415 | * Packets transferred up to and including this TRB = packets_transferred = |
3416 | * rounddown(total bytes transferred including this TRB / wMaxPacketSize) |
3417 | * |
3418 | * TD size = total_packet_count - packets_transferred |
3419 | * |
3420 | * For xHCI 0.96 and older, TD size field should be the remaining bytes |
3421 | * including this TRB, right shifted by 10 |
3422 | * |
3423 | * For all hosts it must fit in bits 21:17, so it can't be bigger than 31. |
3424 | * This is taken care of in the TRB_TD_SIZE() macro |
3425 | * |
3426 | * The last TRB in a TD must have the TD size set to zero. |
3427 | */ |
3428 | static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred, |
3429 | int trb_buff_len, unsigned int td_total_len, |
3430 | struct urb *urb, bool more_trbs_coming) |
3431 | { |
3432 | u32 maxp, total_packet_count; |
3433 | |
3434 | /* MTK xHCI 0.96 contains some features from 1.0 */ |
3435 | if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST)) |
3436 | return ((td_total_len - transferred) >> 10); |
3437 | |
3438 | /* One TRB with a zero-length data packet. */ |
3439 | if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) || |
3440 | trb_buff_len == td_total_len) |
3441 | return 0; |
3442 | |
3443 | /* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */ |
3444 | if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100)) |
3445 | trb_buff_len = 0; |
3446 | |
3447 | maxp = usb_endpoint_maxp(epd: &urb->ep->desc); |
3448 | total_packet_count = DIV_ROUND_UP(td_total_len, maxp); |
3449 | |
3450 | /* Queueing functions don't count the current TRB into transferred */ |
3451 | return (total_packet_count - ((transferred + trb_buff_len) / maxp)); |
3452 | } |
3453 | |
3454 | |
3455 | static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, |
3456 | u32 *trb_buff_len, struct xhci_segment *seg) |
3457 | { |
3458 | struct device *dev = xhci_to_hcd(xhci)->self.sysdev; |
3459 | unsigned int unalign; |
3460 | unsigned int max_pkt; |
3461 | u32 new_buff_len; |
3462 | size_t len; |
3463 | |
3464 | max_pkt = usb_endpoint_maxp(epd: &urb->ep->desc); |
3465 | unalign = (enqd_len + *trb_buff_len) % max_pkt; |
3466 | |
3467 | /* we got lucky, last normal TRB data on segment is packet aligned */ |
3468 | if (unalign == 0) |
3469 | return 0; |
3470 | |
3471 | xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n" , |
3472 | unalign, *trb_buff_len); |
3473 | |
3474 | /* is the last nornal TRB alignable by splitting it */ |
3475 | if (*trb_buff_len > unalign) { |
3476 | *trb_buff_len -= unalign; |
3477 | xhci_dbg(xhci, "split align, new buff len %d\n" , *trb_buff_len); |
3478 | return 0; |
3479 | } |
3480 | |
3481 | /* |
3482 | * We want enqd_len + trb_buff_len to sum up to a number aligned to |
3483 | * number which is divisible by the endpoint's wMaxPacketSize. IOW: |
3484 | * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0. |
3485 | */ |
3486 | new_buff_len = max_pkt - (enqd_len % max_pkt); |
3487 | |
3488 | if (new_buff_len > (urb->transfer_buffer_length - enqd_len)) |
3489 | new_buff_len = (urb->transfer_buffer_length - enqd_len); |
3490 | |
3491 | /* create a max max_pkt sized bounce buffer pointed to by last trb */ |
3492 | if (usb_urb_dir_out(urb)) { |
3493 | if (urb->num_sgs) { |
3494 | len = sg_pcopy_to_buffer(sgl: urb->sg, nents: urb->num_sgs, |
3495 | buf: seg->bounce_buf, buflen: new_buff_len, skip: enqd_len); |
3496 | if (len != new_buff_len) |
3497 | xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n" , |
3498 | len, new_buff_len); |
3499 | } else { |
3500 | memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len); |
3501 | } |
3502 | |
3503 | seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, |
3504 | max_pkt, DMA_TO_DEVICE); |
3505 | } else { |
3506 | seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, |
3507 | max_pkt, DMA_FROM_DEVICE); |
3508 | } |
3509 | |
3510 | if (dma_mapping_error(dev, dma_addr: seg->bounce_dma)) { |
3511 | /* try without aligning. Some host controllers survive */ |
3512 | xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n" ); |
3513 | return 0; |
3514 | } |
3515 | *trb_buff_len = new_buff_len; |
3516 | seg->bounce_len = new_buff_len; |
3517 | seg->bounce_offs = enqd_len; |
3518 | |
3519 | xhci_dbg(xhci, "Bounce align, new buff len %d\n" , *trb_buff_len); |
3520 | |
3521 | return 1; |
3522 | } |
3523 | |
3524 | /* This is very similar to what ehci-q.c qtd_fill() does */ |
3525 | int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, |
3526 | struct urb *urb, int slot_id, unsigned int ep_index) |
3527 | { |
3528 | struct xhci_ring *ring; |
3529 | struct urb_priv *urb_priv; |
3530 | struct xhci_td *td; |
3531 | struct xhci_generic_trb *start_trb; |
3532 | struct scatterlist *sg = NULL; |
3533 | bool more_trbs_coming = true; |
3534 | bool need_zero_pkt = false; |
3535 | bool first_trb = true; |
3536 | unsigned int num_trbs; |
3537 | unsigned int start_cycle, num_sgs = 0; |
3538 | unsigned int enqd_len, block_len, trb_buff_len, full_len; |
3539 | int sent_len, ret; |
3540 | u32 field, length_field, remainder; |
3541 | u64 addr, send_addr; |
3542 | |
3543 | ring = xhci_urb_to_transfer_ring(xhci, urb); |
3544 | if (!ring) |
3545 | return -EINVAL; |
3546 | |
3547 | full_len = urb->transfer_buffer_length; |
3548 | /* If we have scatter/gather list, we use it. */ |
3549 | if (urb->num_sgs && !(urb->transfer_flags & URB_DMA_MAP_SINGLE)) { |
3550 | num_sgs = urb->num_mapped_sgs; |
3551 | sg = urb->sg; |
3552 | addr = (u64) sg_dma_address(sg); |
3553 | block_len = sg_dma_len(sg); |
3554 | num_trbs = count_sg_trbs_needed(urb); |
3555 | } else { |
3556 | num_trbs = count_trbs_needed(urb); |
3557 | addr = (u64) urb->transfer_dma; |
3558 | block_len = full_len; |
3559 | } |
3560 | ret = prepare_transfer(xhci, xdev: xhci->devs[slot_id], |
3561 | ep_index, stream_id: urb->stream_id, |
3562 | num_trbs, urb, td_index: 0, mem_flags); |
3563 | if (unlikely(ret < 0)) |
3564 | return ret; |
3565 | |
3566 | urb_priv = urb->hcpriv; |
3567 | |
3568 | /* Deal with URB_ZERO_PACKET - need one more td/trb */ |
3569 | if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->num_tds > 1) |
3570 | need_zero_pkt = true; |
3571 | |
3572 | td = &urb_priv->td[0]; |
3573 | |
3574 | /* |
3575 | * Don't give the first TRB to the hardware (by toggling the cycle bit) |
3576 | * until we've finished creating all the other TRBs. The ring's cycle |
3577 | * state may change as we enqueue the other TRBs, so save it too. |
3578 | */ |
3579 | start_trb = &ring->enqueue->generic; |
3580 | start_cycle = ring->cycle_state; |
3581 | send_addr = addr; |
3582 | |
3583 | /* Queue the TRBs, even if they are zero-length */ |
3584 | for (enqd_len = 0; first_trb || enqd_len < full_len; |
3585 | enqd_len += trb_buff_len) { |
3586 | field = TRB_TYPE(TRB_NORMAL); |
3587 | |
3588 | /* TRB buffer should not cross 64KB boundaries */ |
3589 | trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr); |
3590 | trb_buff_len = min_t(unsigned int, trb_buff_len, block_len); |
3591 | |
3592 | if (enqd_len + trb_buff_len > full_len) |
3593 | trb_buff_len = full_len - enqd_len; |
3594 | |
3595 | /* Don't change the cycle bit of the first TRB until later */ |
3596 | if (first_trb) { |
3597 | first_trb = false; |
3598 | if (start_cycle == 0) |
3599 | field |= TRB_CYCLE; |
3600 | } else |
3601 | field |= ring->cycle_state; |
3602 | |
3603 | /* Chain all the TRBs together; clear the chain bit in the last |
3604 | * TRB to indicate it's the last TRB in the chain. |
3605 | */ |
3606 | if (enqd_len + trb_buff_len < full_len) { |
3607 | field |= TRB_CHAIN; |
3608 | if (trb_is_link(trb: ring->enqueue + 1)) { |
3609 | if (xhci_align_td(xhci, urb, enqd_len, |
3610 | trb_buff_len: &trb_buff_len, |
3611 | seg: ring->enq_seg)) { |
3612 | send_addr = ring->enq_seg->bounce_dma; |
3613 | /* assuming TD won't span 2 segs */ |
3614 | td->bounce_seg = ring->enq_seg; |
3615 | } |
3616 | } |
3617 | } |
3618 | if (enqd_len + trb_buff_len >= full_len) { |
3619 | field &= ~TRB_CHAIN; |
3620 | field |= TRB_IOC; |
3621 | more_trbs_coming = false; |
3622 | td->last_trb = ring->enqueue; |
3623 | td->last_trb_seg = ring->enq_seg; |
3624 | if (xhci_urb_suitable_for_idt(urb)) { |
3625 | memcpy(&send_addr, urb->transfer_buffer, |
3626 | trb_buff_len); |
3627 | le64_to_cpus(&send_addr); |
3628 | field |= TRB_IDT; |
3629 | } |
3630 | } |
3631 | |
3632 | /* Only set interrupt on short packet for IN endpoints */ |
3633 | if (usb_urb_dir_in(urb)) |
3634 | field |= TRB_ISP; |
3635 | |
3636 | /* Set the TRB length, TD size, and interrupter fields. */ |
3637 | remainder = xhci_td_remainder(xhci, transferred: enqd_len, trb_buff_len, |
3638 | td_total_len: full_len, urb, more_trbs_coming); |
3639 | |
3640 | length_field = TRB_LEN(trb_buff_len) | |
3641 | TRB_TD_SIZE(remainder) | |
3642 | TRB_INTR_TARGET(0); |
3643 | |
3644 | queue_trb(xhci, ring, more_trbs_coming: more_trbs_coming | need_zero_pkt, |
3645 | lower_32_bits(send_addr), |
3646 | upper_32_bits(send_addr), |
3647 | field3: length_field, |
3648 | field4: field); |
3649 | td->num_trbs++; |
3650 | addr += trb_buff_len; |
3651 | sent_len = trb_buff_len; |
3652 | |
3653 | while (sg && sent_len >= block_len) { |
3654 | /* New sg entry */ |
3655 | --num_sgs; |
3656 | sent_len -= block_len; |
3657 | sg = sg_next(sg); |
3658 | if (num_sgs != 0 && sg) { |
3659 | block_len = sg_dma_len(sg); |
3660 | addr = (u64) sg_dma_address(sg); |
3661 | addr += sent_len; |
3662 | } |
3663 | } |
3664 | block_len -= sent_len; |
3665 | send_addr = addr; |
3666 | } |
3667 | |
3668 | if (need_zero_pkt) { |
3669 | ret = prepare_transfer(xhci, xdev: xhci->devs[slot_id], |
3670 | ep_index, stream_id: urb->stream_id, |
3671 | num_trbs: 1, urb, td_index: 1, mem_flags); |
3672 | urb_priv->td[1].last_trb = ring->enqueue; |
3673 | urb_priv->td[1].last_trb_seg = ring->enq_seg; |
3674 | field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC; |
3675 | queue_trb(xhci, ring, more_trbs_coming: 0, field1: 0, field2: 0, TRB_INTR_TARGET(0), field4: field); |
3676 | urb_priv->td[1].num_trbs++; |
3677 | } |
3678 | |
3679 | check_trb_math(urb, running_total: enqd_len); |
3680 | giveback_first_trb(xhci, slot_id, ep_index, stream_id: urb->stream_id, |
3681 | start_cycle, start_trb); |
3682 | return 0; |
3683 | } |
3684 | |
3685 | /* Caller must have locked xhci->lock */ |
3686 | int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, |
3687 | struct urb *urb, int slot_id, unsigned int ep_index) |
3688 | { |
3689 | struct xhci_ring *ep_ring; |
3690 | int num_trbs; |
3691 | int ret; |
3692 | struct usb_ctrlrequest *setup; |
3693 | struct xhci_generic_trb *start_trb; |
3694 | int start_cycle; |
3695 | u32 field; |
3696 | struct urb_priv *urb_priv; |
3697 | struct xhci_td *td; |
3698 | |
3699 | ep_ring = xhci_urb_to_transfer_ring(xhci, urb); |
3700 | if (!ep_ring) |
3701 | return -EINVAL; |
3702 | |
3703 | /* |
3704 | * Need to copy setup packet into setup TRB, so we can't use the setup |
3705 | * DMA address. |
3706 | */ |
3707 | if (!urb->setup_packet) |
3708 | return -EINVAL; |
3709 | |
3710 | /* 1 TRB for setup, 1 for status */ |
3711 | num_trbs = 2; |
3712 | /* |
3713 | * Don't need to check if we need additional event data and normal TRBs, |
3714 | * since data in control transfers will never get bigger than 16MB |
3715 | * XXX: can we get a buffer that crosses 64KB boundaries? |
3716 | */ |
3717 | if (urb->transfer_buffer_length > 0) |
3718 | num_trbs++; |
3719 | ret = prepare_transfer(xhci, xdev: xhci->devs[slot_id], |
3720 | ep_index, stream_id: urb->stream_id, |
3721 | num_trbs, urb, td_index: 0, mem_flags); |
3722 | if (ret < 0) |
3723 | return ret; |
3724 | |
3725 | urb_priv = urb->hcpriv; |
3726 | td = &urb_priv->td[0]; |
3727 | td->num_trbs = num_trbs; |
3728 | |
3729 | /* |
3730 | * Don't give the first TRB to the hardware (by toggling the cycle bit) |
3731 | * until we've finished creating all the other TRBs. The ring's cycle |
3732 | * state may change as we enqueue the other TRBs, so save it too. |
3733 | */ |
3734 | start_trb = &ep_ring->enqueue->generic; |
3735 | start_cycle = ep_ring->cycle_state; |
3736 | |
3737 | /* Queue setup TRB - see section 6.4.1.2.1 */ |
3738 | /* FIXME better way to translate setup_packet into two u32 fields? */ |
3739 | setup = (struct usb_ctrlrequest *) urb->setup_packet; |
3740 | field = 0; |
3741 | field |= TRB_IDT | TRB_TYPE(TRB_SETUP); |
3742 | if (start_cycle == 0) |
3743 | field |= 0x1; |
3744 | |
3745 | /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */ |
3746 | if ((xhci->hci_version >= 0x100) || (xhci->quirks & XHCI_MTK_HOST)) { |
3747 | if (urb->transfer_buffer_length > 0) { |
3748 | if (setup->bRequestType & USB_DIR_IN) |
3749 | field |= TRB_TX_TYPE(TRB_DATA_IN); |
3750 | else |
3751 | field |= TRB_TX_TYPE(TRB_DATA_OUT); |
3752 | } |
3753 | } |
3754 | |
3755 | queue_trb(xhci, ring: ep_ring, more_trbs_coming: true, |
3756 | field1: setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16, |
3757 | le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16, |
3758 | TRB_LEN(8) | TRB_INTR_TARGET(0), |
3759 | /* Immediate data in pointer */ |
3760 | field4: field); |
3761 | |
3762 | /* If there's data, queue data TRBs */ |
3763 | /* Only set interrupt on short packet for IN endpoints */ |
3764 | if (usb_urb_dir_in(urb)) |
3765 | field = TRB_ISP | TRB_TYPE(TRB_DATA); |
3766 | else |
3767 | field = TRB_TYPE(TRB_DATA); |
3768 | |
3769 | if (urb->transfer_buffer_length > 0) { |
3770 | u32 length_field, remainder; |
3771 | u64 addr; |
3772 | |
3773 | if (xhci_urb_suitable_for_idt(urb)) { |
3774 | memcpy(&addr, urb->transfer_buffer, |
3775 | urb->transfer_buffer_length); |
3776 | le64_to_cpus(&addr); |
3777 | field |= TRB_IDT; |
3778 | } else { |
3779 | addr = (u64) urb->transfer_dma; |
3780 | } |
3781 | |
3782 | remainder = xhci_td_remainder(xhci, transferred: 0, |
3783 | trb_buff_len: urb->transfer_buffer_length, |
3784 | td_total_len: urb->transfer_buffer_length, |
3785 | urb, more_trbs_coming: 1); |
3786 | length_field = TRB_LEN(urb->transfer_buffer_length) | |
3787 | TRB_TD_SIZE(remainder) | |
3788 | TRB_INTR_TARGET(0); |
3789 | if (setup->bRequestType & USB_DIR_IN) |
3790 | field |= TRB_DIR_IN; |
3791 | queue_trb(xhci, ring: ep_ring, more_trbs_coming: true, |
3792 | lower_32_bits(addr), |
3793 | upper_32_bits(addr), |
3794 | field3: length_field, |
3795 | field4: field | ep_ring->cycle_state); |
3796 | } |
3797 | |
3798 | /* Save the DMA address of the last TRB in the TD */ |
3799 | td->last_trb = ep_ring->enqueue; |
3800 | td->last_trb_seg = ep_ring->enq_seg; |
3801 | |
3802 | /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */ |
3803 | /* If the device sent data, the status stage is an OUT transfer */ |
3804 | if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN) |
3805 | field = 0; |
3806 | else |
3807 | field = TRB_DIR_IN; |
3808 | queue_trb(xhci, ring: ep_ring, more_trbs_coming: false, |
3809 | field1: 0, |
3810 | field2: 0, |
3811 | TRB_INTR_TARGET(0), |
3812 | /* Event on completion */ |
3813 | field4: field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state); |
3814 | |
3815 | giveback_first_trb(xhci, slot_id, ep_index, stream_id: 0, |
3816 | start_cycle, start_trb); |
3817 | return 0; |
3818 | } |
3819 | |
3820 | /* |
3821 | * The transfer burst count field of the isochronous TRB defines the number of |
3822 | * bursts that are required to move all packets in this TD. Only SuperSpeed |
3823 | * devices can burst up to bMaxBurst number of packets per service interval. |
3824 | * This field is zero based, meaning a value of zero in the field means one |
3825 | * burst. Basically, for everything but SuperSpeed devices, this field will be |
3826 | * zero. Only xHCI 1.0 host controllers support this field. |
3827 | */ |
3828 | static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci, |
3829 | struct urb *urb, unsigned int total_packet_count) |
3830 | { |
3831 | unsigned int max_burst; |
3832 | |
3833 | if (xhci->hci_version < 0x100 || urb->dev->speed < USB_SPEED_SUPER) |
3834 | return 0; |
3835 | |
3836 | max_burst = urb->ep->ss_ep_comp.bMaxBurst; |
3837 | return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1; |
3838 | } |
3839 | |
3840 | /* |
3841 | * Returns the number of packets in the last "burst" of packets. This field is |
3842 | * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so |
3843 | * the last burst packet count is equal to the total number of packets in the |
3844 | * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst |
3845 | * must contain (bMaxBurst + 1) number of packets, but the last burst can |
3846 | * contain 1 to (bMaxBurst + 1) packets. |
3847 | */ |
3848 | static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci, |
3849 | struct urb *urb, unsigned int total_packet_count) |
3850 | { |
3851 | unsigned int max_burst; |
3852 | unsigned int residue; |
3853 | |
3854 | if (xhci->hci_version < 0x100) |
3855 | return 0; |
3856 | |
3857 | if (urb->dev->speed >= USB_SPEED_SUPER) { |
3858 | /* bMaxBurst is zero based: 0 means 1 packet per burst */ |
3859 | max_burst = urb->ep->ss_ep_comp.bMaxBurst; |
3860 | residue = total_packet_count % (max_burst + 1); |
3861 | /* If residue is zero, the last burst contains (max_burst + 1) |
3862 | * number of packets, but the TLBPC field is zero-based. |
3863 | */ |
3864 | if (residue == 0) |
3865 | return max_burst; |
3866 | return residue - 1; |
3867 | } |
3868 | if (total_packet_count == 0) |
3869 | return 0; |
3870 | return total_packet_count - 1; |
3871 | } |
3872 | |
3873 | /* |
3874 | * Calculates Frame ID field of the isochronous TRB identifies the |
3875 | * target frame that the Interval associated with this Isochronous |
3876 | * Transfer Descriptor will start on. Refer to 4.11.2.5 in 1.1 spec. |
3877 | * |
3878 | * Returns actual frame id on success, negative value on error. |
3879 | */ |
3880 | static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci, |
3881 | struct urb *urb, int index) |
3882 | { |
3883 | int start_frame, ist, ret = 0; |
3884 | int start_frame_id, end_frame_id, current_frame_id; |
3885 | |
3886 | if (urb->dev->speed == USB_SPEED_LOW || |
3887 | urb->dev->speed == USB_SPEED_FULL) |
3888 | start_frame = urb->start_frame + index * urb->interval; |
3889 | else |
3890 | start_frame = (urb->start_frame + index * urb->interval) >> 3; |
3891 | |
3892 | /* Isochronous Scheduling Threshold (IST, bits 0~3 in HCSPARAMS2): |
3893 | * |
3894 | * If bit [3] of IST is cleared to '0', software can add a TRB no |
3895 | * later than IST[2:0] Microframes before that TRB is scheduled to |
3896 | * be executed. |
3897 | * If bit [3] of IST is set to '1', software can add a TRB no later |
3898 | * than IST[2:0] Frames before that TRB is scheduled to be executed. |
3899 | */ |
3900 | ist = HCS_IST(xhci->hcs_params2) & 0x7; |
3901 | if (HCS_IST(xhci->hcs_params2) & (1 << 3)) |
3902 | ist <<= 3; |
3903 | |
3904 | /* Software shall not schedule an Isoch TD with a Frame ID value that |
3905 | * is less than the Start Frame ID or greater than the End Frame ID, |
3906 | * where: |
3907 | * |
3908 | * End Frame ID = (Current MFINDEX register value + 895 ms.) MOD 2048 |
3909 | * Start Frame ID = (Current MFINDEX register value + IST + 1) MOD 2048 |
3910 | * |
3911 | * Both the End Frame ID and Start Frame ID values are calculated |
3912 | * in microframes. When software determines the valid Frame ID value; |
3913 | * The End Frame ID value should be rounded down to the nearest Frame |
3914 | * boundary, and the Start Frame ID value should be rounded up to the |
3915 | * nearest Frame boundary. |
3916 | */ |
3917 | current_frame_id = readl(addr: &xhci->run_regs->microframe_index); |
3918 | start_frame_id = roundup(current_frame_id + ist + 1, 8); |
3919 | end_frame_id = rounddown(current_frame_id + 895 * 8, 8); |
3920 | |
3921 | start_frame &= 0x7ff; |
3922 | start_frame_id = (start_frame_id >> 3) & 0x7ff; |
3923 | end_frame_id = (end_frame_id >> 3) & 0x7ff; |
3924 | |
3925 | xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n" , |
3926 | __func__, index, readl(&xhci->run_regs->microframe_index), |
3927 | start_frame_id, end_frame_id, start_frame); |
3928 | |
3929 | if (start_frame_id < end_frame_id) { |
3930 | if (start_frame > end_frame_id || |
3931 | start_frame < start_frame_id) |
3932 | ret = -EINVAL; |
3933 | } else if (start_frame_id > end_frame_id) { |
3934 | if ((start_frame > end_frame_id && |
3935 | start_frame < start_frame_id)) |
3936 | ret = -EINVAL; |
3937 | } else { |
3938 | ret = -EINVAL; |
3939 | } |
3940 | |
3941 | if (index == 0) { |
3942 | if (ret == -EINVAL || start_frame == start_frame_id) { |
3943 | start_frame = start_frame_id + 1; |
3944 | if (urb->dev->speed == USB_SPEED_LOW || |
3945 | urb->dev->speed == USB_SPEED_FULL) |
3946 | urb->start_frame = start_frame; |
3947 | else |
3948 | urb->start_frame = start_frame << 3; |
3949 | ret = 0; |
3950 | } |
3951 | } |
3952 | |
3953 | if (ret) { |
3954 | xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n" , |
3955 | start_frame, current_frame_id, index, |
3956 | start_frame_id, end_frame_id); |
3957 | xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n" ); |
3958 | return ret; |
3959 | } |
3960 | |
3961 | return start_frame; |
3962 | } |
3963 | |
3964 | /* Check if we should generate event interrupt for a TD in an isoc URB */ |
3965 | static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i) |
3966 | { |
3967 | if (xhci->hci_version < 0x100) |
3968 | return false; |
3969 | /* always generate an event interrupt for the last TD */ |
3970 | if (i == num_tds - 1) |
3971 | return false; |
3972 | /* |
3973 | * If AVOID_BEI is set the host handles full event rings poorly, |
3974 | * generate an event at least every 8th TD to clear the event ring |
3975 | */ |
3976 | if (i && xhci->quirks & XHCI_AVOID_BEI) |
3977 | return !!(i % xhci->isoc_bei_interval); |
3978 | |
3979 | return true; |
3980 | } |
3981 | |
3982 | /* This is for isoc transfer */ |
3983 | static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, |
3984 | struct urb *urb, int slot_id, unsigned int ep_index) |
3985 | { |
3986 | struct xhci_ring *ep_ring; |
3987 | struct urb_priv *urb_priv; |
3988 | struct xhci_td *td; |
3989 | int num_tds, trbs_per_td; |
3990 | struct xhci_generic_trb *start_trb; |
3991 | bool first_trb; |
3992 | int start_cycle; |
3993 | u32 field, length_field; |
3994 | int running_total, trb_buff_len, td_len, td_remain_len, ret; |
3995 | u64 start_addr, addr; |
3996 | int i, j; |
3997 | bool more_trbs_coming; |
3998 | struct xhci_virt_ep *xep; |
3999 | int frame_id; |
4000 | |
4001 | xep = &xhci->devs[slot_id]->eps[ep_index]; |
4002 | ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; |
4003 | |
4004 | num_tds = urb->number_of_packets; |
4005 | if (num_tds < 1) { |
4006 | xhci_dbg(xhci, "Isoc URB with zero packets?\n" ); |
4007 | return -EINVAL; |
4008 | } |
4009 | start_addr = (u64) urb->transfer_dma; |
4010 | start_trb = &ep_ring->enqueue->generic; |
4011 | start_cycle = ep_ring->cycle_state; |
4012 | |
4013 | urb_priv = urb->hcpriv; |
4014 | /* Queue the TRBs for each TD, even if they are zero-length */ |
4015 | for (i = 0; i < num_tds; i++) { |
4016 | unsigned int total_pkt_count, max_pkt; |
4017 | unsigned int burst_count, last_burst_pkt_count; |
4018 | u32 sia_frame_id; |
4019 | |
4020 | first_trb = true; |
4021 | running_total = 0; |
4022 | addr = start_addr + urb->iso_frame_desc[i].offset; |
4023 | td_len = urb->iso_frame_desc[i].length; |
4024 | td_remain_len = td_len; |
4025 | max_pkt = usb_endpoint_maxp(epd: &urb->ep->desc); |
4026 | total_pkt_count = DIV_ROUND_UP(td_len, max_pkt); |
4027 | |
4028 | /* A zero-length transfer still involves at least one packet. */ |
4029 | if (total_pkt_count == 0) |
4030 | total_pkt_count++; |
4031 | burst_count = xhci_get_burst_count(xhci, urb, total_packet_count: total_pkt_count); |
4032 | last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci, |
4033 | urb, total_packet_count: total_pkt_count); |
4034 | |
4035 | trbs_per_td = count_isoc_trbs_needed(urb, i); |
4036 | |
4037 | ret = prepare_transfer(xhci, xdev: xhci->devs[slot_id], ep_index, |
4038 | stream_id: urb->stream_id, num_trbs: trbs_per_td, urb, td_index: i, mem_flags); |
4039 | if (ret < 0) { |
4040 | if (i == 0) |
4041 | return ret; |
4042 | goto cleanup; |
4043 | } |
4044 | td = &urb_priv->td[i]; |
4045 | td->num_trbs = trbs_per_td; |
4046 | /* use SIA as default, if frame id is used overwrite it */ |
4047 | sia_frame_id = TRB_SIA; |
4048 | if (!(urb->transfer_flags & URB_ISO_ASAP) && |
4049 | HCC_CFC(xhci->hcc_params)) { |
4050 | frame_id = xhci_get_isoc_frame_id(xhci, urb, index: i); |
4051 | if (frame_id >= 0) |
4052 | sia_frame_id = TRB_FRAME_ID(frame_id); |
4053 | } |
4054 | /* |
4055 | * Set isoc specific data for the first TRB in a TD. |
4056 | * Prevent HW from getting the TRBs by keeping the cycle state |
4057 | * inverted in the first TDs isoc TRB. |
4058 | */ |
4059 | field = TRB_TYPE(TRB_ISOC) | |
4060 | TRB_TLBPC(last_burst_pkt_count) | |
4061 | sia_frame_id | |
4062 | (i ? ep_ring->cycle_state : !start_cycle); |
4063 | |
4064 | /* xhci 1.1 with ETE uses TD_Size field for TBC, old is Rsvdz */ |
4065 | if (!xep->use_extended_tbc) |
4066 | field |= TRB_TBC(burst_count); |
4067 | |
4068 | /* fill the rest of the TRB fields, and remaining normal TRBs */ |
4069 | for (j = 0; j < trbs_per_td; j++) { |
4070 | u32 remainder = 0; |
4071 | |
4072 | /* only first TRB is isoc, overwrite otherwise */ |
4073 | if (!first_trb) |
4074 | field = TRB_TYPE(TRB_NORMAL) | |
4075 | ep_ring->cycle_state; |
4076 | |
4077 | /* Only set interrupt on short packet for IN EPs */ |
4078 | if (usb_urb_dir_in(urb)) |
4079 | field |= TRB_ISP; |
4080 | |
4081 | /* Set the chain bit for all except the last TRB */ |
4082 | if (j < trbs_per_td - 1) { |
4083 | more_trbs_coming = true; |
4084 | field |= TRB_CHAIN; |
4085 | } else { |
4086 | more_trbs_coming = false; |
4087 | td->last_trb = ep_ring->enqueue; |
4088 | td->last_trb_seg = ep_ring->enq_seg; |
4089 | field |= TRB_IOC; |
4090 | if (trb_block_event_intr(xhci, num_tds, i)) |
4091 | field |= TRB_BEI; |
4092 | } |
4093 | /* Calculate TRB length */ |
4094 | trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr); |
4095 | if (trb_buff_len > td_remain_len) |
4096 | trb_buff_len = td_remain_len; |
4097 | |
4098 | /* Set the TRB length, TD size, & interrupter fields. */ |
4099 | remainder = xhci_td_remainder(xhci, transferred: running_total, |
4100 | trb_buff_len, td_total_len: td_len, |
4101 | urb, more_trbs_coming); |
4102 | |
4103 | length_field = TRB_LEN(trb_buff_len) | |
4104 | TRB_INTR_TARGET(0); |
4105 | |
4106 | /* xhci 1.1 with ETE uses TD Size field for TBC */ |
4107 | if (first_trb && xep->use_extended_tbc) |
4108 | length_field |= TRB_TD_SIZE_TBC(burst_count); |
4109 | else |
4110 | length_field |= TRB_TD_SIZE(remainder); |
4111 | first_trb = false; |
4112 | |
4113 | queue_trb(xhci, ring: ep_ring, more_trbs_coming, |
4114 | lower_32_bits(addr), |
4115 | upper_32_bits(addr), |
4116 | field3: length_field, |
4117 | field4: field); |
4118 | running_total += trb_buff_len; |
4119 | |
4120 | addr += trb_buff_len; |
4121 | td_remain_len -= trb_buff_len; |
4122 | } |
4123 | |
4124 | /* Check TD length */ |
4125 | if (running_total != td_len) { |
4126 | xhci_err(xhci, "ISOC TD length unmatch\n" ); |
4127 | ret = -EINVAL; |
4128 | goto cleanup; |
4129 | } |
4130 | } |
4131 | |
4132 | /* store the next frame id */ |
4133 | if (HCC_CFC(xhci->hcc_params)) |
4134 | xep->next_frame_id = urb->start_frame + num_tds * urb->interval; |
4135 | |
4136 | if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { |
4137 | if (xhci->quirks & XHCI_AMD_PLL_FIX) |
4138 | usb_amd_quirk_pll_disable(); |
4139 | } |
4140 | xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++; |
4141 | |
4142 | giveback_first_trb(xhci, slot_id, ep_index, stream_id: urb->stream_id, |
4143 | start_cycle, start_trb); |
4144 | return 0; |
4145 | cleanup: |
4146 | /* Clean up a partially enqueued isoc transfer. */ |
4147 | |
4148 | for (i--; i >= 0; i--) |
4149 | list_del_init(entry: &urb_priv->td[i].td_list); |
4150 | |
4151 | /* Use the first TD as a temporary variable to turn the TDs we've queued |
4152 | * into No-ops with a software-owned cycle bit. That way the hardware |
4153 | * won't accidentally start executing bogus TDs when we partially |
4154 | * overwrite them. td->first_trb and td->start_seg are already set. |
4155 | */ |
4156 | urb_priv->td[0].last_trb = ep_ring->enqueue; |
4157 | /* Every TRB except the first & last will have its cycle bit flipped. */ |
4158 | td_to_noop(xhci, ep_ring, td: &urb_priv->td[0], flip_cycle: true); |
4159 | |
4160 | /* Reset the ring enqueue back to the first TRB and its cycle bit. */ |
4161 | ep_ring->enqueue = urb_priv->td[0].first_trb; |
4162 | ep_ring->enq_seg = urb_priv->td[0].start_seg; |
4163 | ep_ring->cycle_state = start_cycle; |
4164 | usb_hcd_unlink_urb_from_ep(hcd: bus_to_hcd(bus: urb->dev->bus), urb); |
4165 | return ret; |
4166 | } |
4167 | |
4168 | /* |
4169 | * Check transfer ring to guarantee there is enough room for the urb. |
4170 | * Update ISO URB start_frame and interval. |
4171 | * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to |
4172 | * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or |
4173 | * Contiguous Frame ID is not supported by HC. |
4174 | */ |
4175 | int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, |
4176 | struct urb *urb, int slot_id, unsigned int ep_index) |
4177 | { |
4178 | struct xhci_virt_device *xdev; |
4179 | struct xhci_ring *ep_ring; |
4180 | struct xhci_ep_ctx *ep_ctx; |
4181 | int start_frame; |
4182 | int num_tds, num_trbs, i; |
4183 | int ret; |
4184 | struct xhci_virt_ep *xep; |
4185 | int ist; |
4186 | |
4187 | xdev = xhci->devs[slot_id]; |
4188 | xep = &xhci->devs[slot_id]->eps[ep_index]; |
4189 | ep_ring = xdev->eps[ep_index].ring; |
4190 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: xdev->out_ctx, ep_index); |
4191 | |
4192 | num_trbs = 0; |
4193 | num_tds = urb->number_of_packets; |
4194 | for (i = 0; i < num_tds; i++) |
4195 | num_trbs += count_isoc_trbs_needed(urb, i); |
4196 | |
4197 | /* Check the ring to guarantee there is enough room for the whole urb. |
4198 | * Do not insert any td of the urb to the ring if the check failed. |
4199 | */ |
4200 | ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx), |
4201 | num_trbs, mem_flags); |
4202 | if (ret) |
4203 | return ret; |
4204 | |
4205 | /* |
4206 | * Check interval value. This should be done before we start to |
4207 | * calculate the start frame value. |
4208 | */ |
4209 | check_interval(xhci, urb, ep_ctx); |
4210 | |
4211 | /* Calculate the start frame and put it in urb->start_frame. */ |
4212 | if (HCC_CFC(xhci->hcc_params) && !list_empty(head: &ep_ring->td_list)) { |
4213 | if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_RUNNING) { |
4214 | urb->start_frame = xep->next_frame_id; |
4215 | goto skip_start_over; |
4216 | } |
4217 | } |
4218 | |
4219 | start_frame = readl(addr: &xhci->run_regs->microframe_index); |
4220 | start_frame &= 0x3fff; |
4221 | /* |
4222 | * Round up to the next frame and consider the time before trb really |
4223 | * gets scheduled by hardare. |
4224 | */ |
4225 | ist = HCS_IST(xhci->hcs_params2) & 0x7; |
4226 | if (HCS_IST(xhci->hcs_params2) & (1 << 3)) |
4227 | ist <<= 3; |
4228 | start_frame += ist + XHCI_CFC_DELAY; |
4229 | start_frame = roundup(start_frame, 8); |
4230 | |
4231 | /* |
4232 | * Round up to the next ESIT (Endpoint Service Interval Time) if ESIT |
4233 | * is greate than 8 microframes. |
4234 | */ |
4235 | if (urb->dev->speed == USB_SPEED_LOW || |
4236 | urb->dev->speed == USB_SPEED_FULL) { |
4237 | start_frame = roundup(start_frame, urb->interval << 3); |
4238 | urb->start_frame = start_frame >> 3; |
4239 | } else { |
4240 | start_frame = roundup(start_frame, urb->interval); |
4241 | urb->start_frame = start_frame; |
4242 | } |
4243 | |
4244 | skip_start_over: |
4245 | |
4246 | return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index); |
4247 | } |
4248 | |
4249 | /**** Command Ring Operations ****/ |
4250 | |
4251 | /* Generic function for queueing a command TRB on the command ring. |
4252 | * Check to make sure there's room on the command ring for one command TRB. |
4253 | * Also check that there's room reserved for commands that must not fail. |
4254 | * If this is a command that must not fail, meaning command_must_succeed = TRUE, |
4255 | * then only check for the number of reserved spots. |
4256 | * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB |
4257 | * because the command event handler may want to resubmit a failed command. |
4258 | */ |
4259 | static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd, |
4260 | u32 field1, u32 field2, |
4261 | u32 field3, u32 field4, bool command_must_succeed) |
4262 | { |
4263 | int reserved_trbs = xhci->cmd_ring_reserved_trbs; |
4264 | int ret; |
4265 | |
4266 | if ((xhci->xhc_state & XHCI_STATE_DYING) || |
4267 | (xhci->xhc_state & XHCI_STATE_HALTED)) { |
4268 | xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n" ); |
4269 | return -ESHUTDOWN; |
4270 | } |
4271 | |
4272 | if (!command_must_succeed) |
4273 | reserved_trbs++; |
4274 | |
4275 | ret = prepare_ring(xhci, ep_ring: xhci->cmd_ring, EP_STATE_RUNNING, |
4276 | num_trbs: reserved_trbs, GFP_ATOMIC); |
4277 | if (ret < 0) { |
4278 | xhci_err(xhci, "ERR: No room for command on command ring\n" ); |
4279 | if (command_must_succeed) |
4280 | xhci_err(xhci, "ERR: Reserved TRB counting for " |
4281 | "unfailable commands failed.\n" ); |
4282 | return ret; |
4283 | } |
4284 | |
4285 | cmd->command_trb = xhci->cmd_ring->enqueue; |
4286 | |
4287 | /* if there are no other commands queued we start the timeout timer */ |
4288 | if (list_empty(head: &xhci->cmd_list)) { |
4289 | xhci->current_cmd = cmd; |
4290 | xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT); |
4291 | } |
4292 | |
4293 | list_add_tail(new: &cmd->cmd_list, head: &xhci->cmd_list); |
4294 | |
4295 | queue_trb(xhci, ring: xhci->cmd_ring, more_trbs_coming: false, field1, field2, field3, |
4296 | field4: field4 | xhci->cmd_ring->cycle_state); |
4297 | return 0; |
4298 | } |
4299 | |
4300 | /* Queue a slot enable or disable request on the command ring */ |
4301 | int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd, |
4302 | u32 trb_type, u32 slot_id) |
4303 | { |
4304 | return queue_command(xhci, cmd, field1: 0, field2: 0, field3: 0, |
4305 | TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), command_must_succeed: false); |
4306 | } |
4307 | |
4308 | /* Queue an address device command TRB */ |
4309 | int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd, |
4310 | dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup) |
4311 | { |
4312 | return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), |
4313 | upper_32_bits(in_ctx_ptr), field3: 0, |
4314 | TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id) |
4315 | | (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), command_must_succeed: false); |
4316 | } |
4317 | |
4318 | int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd, |
4319 | u32 field1, u32 field2, u32 field3, u32 field4) |
4320 | { |
4321 | return queue_command(xhci, cmd, field1, field2, field3, field4, command_must_succeed: false); |
4322 | } |
4323 | |
4324 | /* Queue a reset device command TRB */ |
4325 | int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd, |
4326 | u32 slot_id) |
4327 | { |
4328 | return queue_command(xhci, cmd, field1: 0, field2: 0, field3: 0, |
4329 | TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id), |
4330 | command_must_succeed: false); |
4331 | } |
4332 | |
4333 | /* Queue a configure endpoint command TRB */ |
4334 | int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, |
4335 | struct xhci_command *cmd, dma_addr_t in_ctx_ptr, |
4336 | u32 slot_id, bool command_must_succeed) |
4337 | { |
4338 | return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), |
4339 | upper_32_bits(in_ctx_ptr), field3: 0, |
4340 | TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id), |
4341 | command_must_succeed); |
4342 | } |
4343 | |
4344 | /* Queue an evaluate context command TRB */ |
4345 | int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd, |
4346 | dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed) |
4347 | { |
4348 | return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), |
4349 | upper_32_bits(in_ctx_ptr), field3: 0, |
4350 | TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id), |
4351 | command_must_succeed); |
4352 | } |
4353 | |
4354 | /* |
4355 | * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop |
4356 | * activity on an endpoint that is about to be suspended. |
4357 | */ |
4358 | int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd, |
4359 | int slot_id, unsigned int ep_index, int suspend) |
4360 | { |
4361 | u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); |
4362 | u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); |
4363 | u32 type = TRB_TYPE(TRB_STOP_RING); |
4364 | u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend); |
4365 | |
4366 | return queue_command(xhci, cmd, field1: 0, field2: 0, field3: 0, |
4367 | field4: trb_slot_id | trb_ep_index | type | trb_suspend, command_must_succeed: false); |
4368 | } |
4369 | |
4370 | int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd, |
4371 | int slot_id, unsigned int ep_index, |
4372 | enum xhci_ep_reset_type reset_type) |
4373 | { |
4374 | u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); |
4375 | u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); |
4376 | u32 type = TRB_TYPE(TRB_RESET_EP); |
4377 | |
4378 | if (reset_type == EP_SOFT_RESET) |
4379 | type |= TRB_TSP; |
4380 | |
4381 | return queue_command(xhci, cmd, field1: 0, field2: 0, field3: 0, |
4382 | field4: trb_slot_id | trb_ep_index | type, command_must_succeed: false); |
4383 | } |
4384 | |