1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * xHCI host controller driver |
4 | * |
5 | * Copyright (C) 2008 Intel Corp. |
6 | * |
7 | * Author: Sarah Sharp |
8 | * Some code borrowed from the Linux EHCI driver. |
9 | */ |
10 | |
11 | #include <linux/pci.h> |
12 | #include <linux/iommu.h> |
13 | #include <linux/iopoll.h> |
14 | #include <linux/irq.h> |
15 | #include <linux/log2.h> |
16 | #include <linux/module.h> |
17 | #include <linux/moduleparam.h> |
18 | #include <linux/slab.h> |
19 | #include <linux/dmi.h> |
20 | #include <linux/dma-mapping.h> |
21 | |
22 | #include "xhci.h" |
23 | #include "xhci-trace.h" |
24 | #include "xhci-debugfs.h" |
25 | #include "xhci-dbgcap.h" |
26 | |
27 | #define DRIVER_AUTHOR "Sarah Sharp" |
28 | #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" |
29 | |
30 | #define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E) |
31 | |
32 | /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ |
33 | static int link_quirk; |
34 | module_param(link_quirk, int, S_IRUGO | S_IWUSR); |
35 | MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB" ); |
36 | |
37 | static unsigned long long quirks; |
38 | module_param(quirks, ullong, S_IRUGO); |
39 | MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default" ); |
40 | |
41 | static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring) |
42 | { |
43 | struct xhci_segment *seg = ring->first_seg; |
44 | |
45 | if (!td || !td->start_seg) |
46 | return false; |
47 | do { |
48 | if (seg == td->start_seg) |
49 | return true; |
50 | seg = seg->next; |
51 | } while (seg && seg != ring->first_seg); |
52 | |
53 | return false; |
54 | } |
55 | |
56 | /* |
57 | * xhci_handshake - spin reading hc until handshake completes or fails |
58 | * @ptr: address of hc register to be read |
59 | * @mask: bits to look at in result of read |
60 | * @done: value of those bits when handshake succeeds |
61 | * @usec: timeout in microseconds |
62 | * |
63 | * Returns negative errno, or zero on success |
64 | * |
65 | * Success happens when the "mask" bits have the specified value (hardware |
66 | * handshake done). There are two failure modes: "usec" have passed (major |
67 | * hardware flakeout), or the register reads as all-ones (hardware removed). |
68 | */ |
69 | int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us) |
70 | { |
71 | u32 result; |
72 | int ret; |
73 | |
74 | ret = readl_poll_timeout_atomic(ptr, result, |
75 | (result & mask) == done || |
76 | result == U32_MAX, |
77 | 1, timeout_us); |
78 | if (result == U32_MAX) /* card removed */ |
79 | return -ENODEV; |
80 | |
81 | return ret; |
82 | } |
83 | |
84 | /* |
85 | * xhci_handshake_check_state - same as xhci_handshake but takes an additional |
86 | * exit_state parameter, and bails out with an error immediately when xhc_state |
87 | * has exit_state flag set. |
88 | */ |
89 | int xhci_handshake_check_state(struct xhci_hcd *xhci, void __iomem *ptr, |
90 | u32 mask, u32 done, int usec, unsigned int exit_state) |
91 | { |
92 | u32 result; |
93 | int ret; |
94 | |
95 | ret = readl_poll_timeout_atomic(ptr, result, |
96 | (result & mask) == done || |
97 | result == U32_MAX || |
98 | xhci->xhc_state & exit_state, |
99 | 1, usec); |
100 | |
101 | if (result == U32_MAX || xhci->xhc_state & exit_state) |
102 | return -ENODEV; |
103 | |
104 | return ret; |
105 | } |
106 | |
107 | /* |
108 | * Disable interrupts and begin the xHCI halting process. |
109 | */ |
110 | void xhci_quiesce(struct xhci_hcd *xhci) |
111 | { |
112 | u32 halted; |
113 | u32 cmd; |
114 | u32 mask; |
115 | |
116 | mask = ~(XHCI_IRQS); |
117 | halted = readl(addr: &xhci->op_regs->status) & STS_HALT; |
118 | if (!halted) |
119 | mask &= ~CMD_RUN; |
120 | |
121 | cmd = readl(addr: &xhci->op_regs->command); |
122 | cmd &= mask; |
123 | writel(val: cmd, addr: &xhci->op_regs->command); |
124 | } |
125 | |
126 | /* |
127 | * Force HC into halt state. |
128 | * |
129 | * Disable any IRQs and clear the run/stop bit. |
130 | * HC will complete any current and actively pipelined transactions, and |
131 | * should halt within 16 ms of the run/stop bit being cleared. |
132 | * Read HC Halted bit in the status register to see when the HC is finished. |
133 | */ |
134 | int xhci_halt(struct xhci_hcd *xhci) |
135 | { |
136 | int ret; |
137 | |
138 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, fmt: "// Halt the HC" ); |
139 | xhci_quiesce(xhci); |
140 | |
141 | ret = xhci_handshake(ptr: &xhci->op_regs->status, |
142 | STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); |
143 | if (ret) { |
144 | xhci_warn(xhci, "Host halt failed, %d\n" , ret); |
145 | return ret; |
146 | } |
147 | |
148 | xhci->xhc_state |= XHCI_STATE_HALTED; |
149 | xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; |
150 | |
151 | return ret; |
152 | } |
153 | |
154 | /* |
155 | * Set the run bit and wait for the host to be running. |
156 | */ |
157 | int xhci_start(struct xhci_hcd *xhci) |
158 | { |
159 | u32 temp; |
160 | int ret; |
161 | |
162 | temp = readl(addr: &xhci->op_regs->command); |
163 | temp |= (CMD_RUN); |
164 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, fmt: "// Turn on HC, cmd = 0x%x." , |
165 | temp); |
166 | writel(val: temp, addr: &xhci->op_regs->command); |
167 | |
168 | /* |
169 | * Wait for the HCHalted Status bit to be 0 to indicate the host is |
170 | * running. |
171 | */ |
172 | ret = xhci_handshake(ptr: &xhci->op_regs->status, |
173 | STS_HALT, done: 0, XHCI_MAX_HALT_USEC); |
174 | if (ret == -ETIMEDOUT) |
175 | xhci_err(xhci, "Host took too long to start, " |
176 | "waited %u microseconds.\n" , |
177 | XHCI_MAX_HALT_USEC); |
178 | if (!ret) { |
179 | /* clear state flags. Including dying, halted or removing */ |
180 | xhci->xhc_state = 0; |
181 | xhci->run_graceperiod = jiffies + msecs_to_jiffies(m: 500); |
182 | } |
183 | |
184 | return ret; |
185 | } |
186 | |
187 | /* |
188 | * Reset a halted HC. |
189 | * |
190 | * This resets pipelines, timers, counters, state machines, etc. |
191 | * Transactions will be terminated immediately, and operational registers |
192 | * will be set to their defaults. |
193 | */ |
194 | int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us) |
195 | { |
196 | u32 command; |
197 | u32 state; |
198 | int ret; |
199 | |
200 | state = readl(addr: &xhci->op_regs->status); |
201 | |
202 | if (state == ~(u32)0) { |
203 | xhci_warn(xhci, "Host not accessible, reset failed.\n" ); |
204 | return -ENODEV; |
205 | } |
206 | |
207 | if ((state & STS_HALT) == 0) { |
208 | xhci_warn(xhci, "Host controller not halted, aborting reset.\n" ); |
209 | return 0; |
210 | } |
211 | |
212 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, fmt: "// Reset the HC" ); |
213 | command = readl(addr: &xhci->op_regs->command); |
214 | command |= CMD_RESET; |
215 | writel(val: command, addr: &xhci->op_regs->command); |
216 | |
217 | /* Existing Intel xHCI controllers require a delay of 1 mS, |
218 | * after setting the CMD_RESET bit, and before accessing any |
219 | * HC registers. This allows the HC to complete the |
220 | * reset operation and be ready for HC register access. |
221 | * Without this delay, the subsequent HC register access, |
222 | * may result in a system hang very rarely. |
223 | */ |
224 | if (xhci->quirks & XHCI_INTEL_HOST) |
225 | udelay(1000); |
226 | |
227 | ret = xhci_handshake_check_state(xhci, ptr: &xhci->op_regs->command, |
228 | CMD_RESET, done: 0, usec: timeout_us, XHCI_STATE_REMOVING); |
229 | if (ret) |
230 | return ret; |
231 | |
232 | if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL) |
233 | usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller)); |
234 | |
235 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, |
236 | fmt: "Wait for controller to be ready for doorbell rings" ); |
237 | /* |
238 | * xHCI cannot write to any doorbells or operational registers other |
239 | * than status until the "Controller Not Ready" flag is cleared. |
240 | */ |
241 | ret = xhci_handshake(ptr: &xhci->op_regs->status, STS_CNR, done: 0, timeout_us); |
242 | |
243 | xhci->usb2_rhub.bus_state.port_c_suspend = 0; |
244 | xhci->usb2_rhub.bus_state.suspended_ports = 0; |
245 | xhci->usb2_rhub.bus_state.resuming_ports = 0; |
246 | xhci->usb3_rhub.bus_state.port_c_suspend = 0; |
247 | xhci->usb3_rhub.bus_state.suspended_ports = 0; |
248 | xhci->usb3_rhub.bus_state.resuming_ports = 0; |
249 | |
250 | return ret; |
251 | } |
252 | |
253 | static void xhci_zero_64b_regs(struct xhci_hcd *xhci) |
254 | { |
255 | struct device *dev = xhci_to_hcd(xhci)->self.sysdev; |
256 | struct iommu_domain *domain; |
257 | int err, i; |
258 | u64 val; |
259 | u32 intrs; |
260 | |
261 | /* |
262 | * Some Renesas controllers get into a weird state if they are |
263 | * reset while programmed with 64bit addresses (they will preserve |
264 | * the top half of the address in internal, non visible |
265 | * registers). You end up with half the address coming from the |
266 | * kernel, and the other half coming from the firmware. Also, |
267 | * changing the programming leads to extra accesses even if the |
268 | * controller is supposed to be halted. The controller ends up with |
269 | * a fatal fault, and is then ripe for being properly reset. |
270 | * |
271 | * Special care is taken to only apply this if the device is behind |
272 | * an iommu. Doing anything when there is no iommu is definitely |
273 | * unsafe... |
274 | */ |
275 | domain = iommu_get_domain_for_dev(dev); |
276 | if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !domain || |
277 | domain->type == IOMMU_DOMAIN_IDENTITY) |
278 | return; |
279 | |
280 | xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n" ); |
281 | |
282 | /* Clear HSEIE so that faults do not get signaled */ |
283 | val = readl(addr: &xhci->op_regs->command); |
284 | val &= ~CMD_HSEIE; |
285 | writel(val, addr: &xhci->op_regs->command); |
286 | |
287 | /* Clear HSE (aka FATAL) */ |
288 | val = readl(addr: &xhci->op_regs->status); |
289 | val |= STS_FATAL; |
290 | writel(val, addr: &xhci->op_regs->status); |
291 | |
292 | /* Now zero the registers, and brace for impact */ |
293 | val = xhci_read_64(xhci, regs: &xhci->op_regs->dcbaa_ptr); |
294 | if (upper_32_bits(val)) |
295 | xhci_write_64(xhci, val: 0, regs: &xhci->op_regs->dcbaa_ptr); |
296 | val = xhci_read_64(xhci, regs: &xhci->op_regs->cmd_ring); |
297 | if (upper_32_bits(val)) |
298 | xhci_write_64(xhci, val: 0, regs: &xhci->op_regs->cmd_ring); |
299 | |
300 | intrs = min_t(u32, HCS_MAX_INTRS(xhci->hcs_params1), |
301 | ARRAY_SIZE(xhci->run_regs->ir_set)); |
302 | |
303 | for (i = 0; i < intrs; i++) { |
304 | struct xhci_intr_reg __iomem *ir; |
305 | |
306 | ir = &xhci->run_regs->ir_set[i]; |
307 | val = xhci_read_64(xhci, regs: &ir->erst_base); |
308 | if (upper_32_bits(val)) |
309 | xhci_write_64(xhci, val: 0, regs: &ir->erst_base); |
310 | val= xhci_read_64(xhci, regs: &ir->erst_dequeue); |
311 | if (upper_32_bits(val)) |
312 | xhci_write_64(xhci, val: 0, regs: &ir->erst_dequeue); |
313 | } |
314 | |
315 | /* Wait for the fault to appear. It will be cleared on reset */ |
316 | err = xhci_handshake(ptr: &xhci->op_regs->status, |
317 | STS_FATAL, STS_FATAL, |
318 | XHCI_MAX_HALT_USEC); |
319 | if (!err) |
320 | xhci_info(xhci, "Fault detected\n" ); |
321 | } |
322 | |
323 | static int xhci_enable_interrupter(struct xhci_interrupter *ir) |
324 | { |
325 | u32 iman; |
326 | |
327 | if (!ir || !ir->ir_set) |
328 | return -EINVAL; |
329 | |
330 | iman = readl(addr: &ir->ir_set->irq_pending); |
331 | writel(ER_IRQ_ENABLE(iman), addr: &ir->ir_set->irq_pending); |
332 | |
333 | return 0; |
334 | } |
335 | |
336 | static int xhci_disable_interrupter(struct xhci_interrupter *ir) |
337 | { |
338 | u32 iman; |
339 | |
340 | if (!ir || !ir->ir_set) |
341 | return -EINVAL; |
342 | |
343 | iman = readl(addr: &ir->ir_set->irq_pending); |
344 | writel(ER_IRQ_DISABLE(iman), addr: &ir->ir_set->irq_pending); |
345 | |
346 | return 0; |
347 | } |
348 | |
349 | static void compliance_mode_recovery(struct timer_list *t) |
350 | { |
351 | struct xhci_hcd *xhci; |
352 | struct usb_hcd *hcd; |
353 | struct xhci_hub *rhub; |
354 | u32 temp; |
355 | int i; |
356 | |
357 | xhci = from_timer(xhci, t, comp_mode_recovery_timer); |
358 | rhub = &xhci->usb3_rhub; |
359 | hcd = rhub->hcd; |
360 | |
361 | if (!hcd) |
362 | return; |
363 | |
364 | for (i = 0; i < rhub->num_ports; i++) { |
365 | temp = readl(addr: rhub->ports[i]->addr); |
366 | if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) { |
367 | /* |
368 | * Compliance Mode Detected. Letting USB Core |
369 | * handle the Warm Reset |
370 | */ |
371 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
372 | fmt: "Compliance mode detected->port %d" , |
373 | i + 1); |
374 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
375 | fmt: "Attempting compliance mode recovery" ); |
376 | |
377 | if (hcd->state == HC_STATE_SUSPENDED) |
378 | usb_hcd_resume_root_hub(hcd); |
379 | |
380 | usb_hcd_poll_rh_status(hcd); |
381 | } |
382 | } |
383 | |
384 | if (xhci->port_status_u0 != ((1 << rhub->num_ports) - 1)) |
385 | mod_timer(timer: &xhci->comp_mode_recovery_timer, |
386 | expires: jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS)); |
387 | } |
388 | |
389 | /* |
390 | * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver |
391 | * that causes ports behind that hardware to enter compliance mode sometimes. |
392 | * The quirk creates a timer that polls every 2 seconds the link state of |
393 | * each host controller's port and recovers it by issuing a Warm reset |
394 | * if Compliance mode is detected, otherwise the port will become "dead" (no |
395 | * device connections or disconnections will be detected anymore). Becasue no |
396 | * status event is generated when entering compliance mode (per xhci spec), |
397 | * this quirk is needed on systems that have the failing hardware installed. |
398 | */ |
399 | static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci) |
400 | { |
401 | xhci->port_status_u0 = 0; |
402 | timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery, |
403 | 0); |
404 | xhci->comp_mode_recovery_timer.expires = jiffies + |
405 | msecs_to_jiffies(COMP_MODE_RCVRY_MSECS); |
406 | |
407 | add_timer(timer: &xhci->comp_mode_recovery_timer); |
408 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
409 | fmt: "Compliance mode recovery timer initialized" ); |
410 | } |
411 | |
412 | /* |
413 | * This function identifies the systems that have installed the SN65LVPE502CP |
414 | * USB3.0 re-driver and that need the Compliance Mode Quirk. |
415 | * Systems: |
416 | * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820 |
417 | */ |
418 | static bool xhci_compliance_mode_recovery_timer_quirk_check(void) |
419 | { |
420 | const char *dmi_product_name, *dmi_sys_vendor; |
421 | |
422 | dmi_product_name = dmi_get_system_info(field: DMI_PRODUCT_NAME); |
423 | dmi_sys_vendor = dmi_get_system_info(field: DMI_SYS_VENDOR); |
424 | if (!dmi_product_name || !dmi_sys_vendor) |
425 | return false; |
426 | |
427 | if (!(strstr(dmi_sys_vendor, "Hewlett-Packard" ))) |
428 | return false; |
429 | |
430 | if (strstr(dmi_product_name, "Z420" ) || |
431 | strstr(dmi_product_name, "Z620" ) || |
432 | strstr(dmi_product_name, "Z820" ) || |
433 | strstr(dmi_product_name, "Z1 Workstation" )) |
434 | return true; |
435 | |
436 | return false; |
437 | } |
438 | |
439 | static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci) |
440 | { |
441 | return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1)); |
442 | } |
443 | |
444 | |
445 | /* |
446 | * Initialize memory for HCD and xHC (one-time init). |
447 | * |
448 | * Program the PAGESIZE register, initialize the device context array, create |
449 | * device contexts (?), set up a command ring segment (or two?), create event |
450 | * ring (one for now). |
451 | */ |
452 | static int xhci_init(struct usb_hcd *hcd) |
453 | { |
454 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
455 | int retval; |
456 | |
457 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, fmt: "xhci_init" ); |
458 | spin_lock_init(&xhci->lock); |
459 | if (xhci->hci_version == 0x95 && link_quirk) { |
460 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
461 | fmt: "QUIRK: Not clearing Link TRB chain bits." ); |
462 | xhci->quirks |= XHCI_LINK_TRB_QUIRK; |
463 | } else { |
464 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, |
465 | fmt: "xHCI doesn't need link TRB QUIRK" ); |
466 | } |
467 | retval = xhci_mem_init(xhci, GFP_KERNEL); |
468 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, fmt: "Finished xhci_init" ); |
469 | |
470 | /* Initializing Compliance Mode Recovery Data If Needed */ |
471 | if (xhci_compliance_mode_recovery_timer_quirk_check()) { |
472 | xhci->quirks |= XHCI_COMP_MODE_QUIRK; |
473 | compliance_mode_recovery_timer_init(xhci); |
474 | } |
475 | |
476 | return retval; |
477 | } |
478 | |
479 | /*-------------------------------------------------------------------------*/ |
480 | |
481 | static int xhci_run_finished(struct xhci_hcd *xhci) |
482 | { |
483 | struct xhci_interrupter *ir = xhci->interrupter; |
484 | unsigned long flags; |
485 | u32 temp; |
486 | |
487 | /* |
488 | * Enable interrupts before starting the host (xhci 4.2 and 5.5.2). |
489 | * Protect the short window before host is running with a lock |
490 | */ |
491 | spin_lock_irqsave(&xhci->lock, flags); |
492 | |
493 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, fmt: "Enable interrupts" ); |
494 | temp = readl(addr: &xhci->op_regs->command); |
495 | temp |= (CMD_EIE); |
496 | writel(val: temp, addr: &xhci->op_regs->command); |
497 | |
498 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, fmt: "Enable primary interrupter" ); |
499 | xhci_enable_interrupter(ir); |
500 | |
501 | if (xhci_start(xhci)) { |
502 | xhci_halt(xhci); |
503 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
504 | return -ENODEV; |
505 | } |
506 | |
507 | xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; |
508 | |
509 | if (xhci->quirks & XHCI_NEC_HOST) |
510 | xhci_ring_cmd_db(xhci); |
511 | |
512 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
513 | |
514 | return 0; |
515 | } |
516 | |
517 | /* |
518 | * Start the HC after it was halted. |
519 | * |
520 | * This function is called by the USB core when the HC driver is added. |
521 | * Its opposite is xhci_stop(). |
522 | * |
523 | * xhci_init() must be called once before this function can be called. |
524 | * Reset the HC, enable device slot contexts, program DCBAAP, and |
525 | * set command ring pointer and event ring pointer. |
526 | * |
527 | * Setup MSI-X vectors and enable interrupts. |
528 | */ |
529 | int xhci_run(struct usb_hcd *hcd) |
530 | { |
531 | u32 temp; |
532 | u64 temp_64; |
533 | int ret; |
534 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
535 | struct xhci_interrupter *ir = xhci->interrupter; |
536 | /* Start the xHCI host controller running only after the USB 2.0 roothub |
537 | * is setup. |
538 | */ |
539 | |
540 | hcd->uses_new_polling = 1; |
541 | if (!usb_hcd_is_primary_hcd(hcd)) |
542 | return xhci_run_finished(xhci); |
543 | |
544 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, fmt: "xhci_run" ); |
545 | |
546 | temp_64 = xhci_read_64(xhci, regs: &ir->ir_set->erst_dequeue); |
547 | temp_64 &= ERST_PTR_MASK; |
548 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, |
549 | fmt: "ERST deq = 64'h%0lx" , (long unsigned int) temp_64); |
550 | |
551 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, |
552 | fmt: "// Set the interrupt modulation register" ); |
553 | temp = readl(addr: &ir->ir_set->irq_control); |
554 | temp &= ~ER_IRQ_INTERVAL_MASK; |
555 | temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK; |
556 | writel(val: temp, addr: &ir->ir_set->irq_control); |
557 | |
558 | if (xhci->quirks & XHCI_NEC_HOST) { |
559 | struct xhci_command *command; |
560 | |
561 | command = xhci_alloc_command(xhci, allocate_completion: false, GFP_KERNEL); |
562 | if (!command) |
563 | return -ENOMEM; |
564 | |
565 | ret = xhci_queue_vendor_command(xhci, cmd: command, field1: 0, field2: 0, field3: 0, |
566 | TRB_TYPE(TRB_NEC_GET_FW)); |
567 | if (ret) |
568 | xhci_free_command(xhci, command); |
569 | } |
570 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, |
571 | fmt: "Finished %s for main hcd" , __func__); |
572 | |
573 | xhci_create_dbc_dev(xhci); |
574 | |
575 | xhci_debugfs_init(xhci); |
576 | |
577 | if (xhci_has_one_roothub(xhci)) |
578 | return xhci_run_finished(xhci); |
579 | |
580 | set_bit(HCD_FLAG_DEFER_RH_REGISTER, addr: &hcd->flags); |
581 | |
582 | return 0; |
583 | } |
584 | EXPORT_SYMBOL_GPL(xhci_run); |
585 | |
586 | /* |
587 | * Stop xHCI driver. |
588 | * |
589 | * This function is called by the USB core when the HC driver is removed. |
590 | * Its opposite is xhci_run(). |
591 | * |
592 | * Disable device contexts, disable IRQs, and quiesce the HC. |
593 | * Reset the HC, finish any completed transactions, and cleanup memory. |
594 | */ |
595 | void xhci_stop(struct usb_hcd *hcd) |
596 | { |
597 | u32 temp; |
598 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
599 | struct xhci_interrupter *ir = xhci->interrupter; |
600 | |
601 | mutex_lock(&xhci->mutex); |
602 | |
603 | /* Only halt host and free memory after both hcds are removed */ |
604 | if (!usb_hcd_is_primary_hcd(hcd)) { |
605 | mutex_unlock(lock: &xhci->mutex); |
606 | return; |
607 | } |
608 | |
609 | xhci_remove_dbc_dev(xhci); |
610 | |
611 | spin_lock_irq(lock: &xhci->lock); |
612 | xhci->xhc_state |= XHCI_STATE_HALTED; |
613 | xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; |
614 | xhci_halt(xhci); |
615 | xhci_reset(xhci, XHCI_RESET_SHORT_USEC); |
616 | spin_unlock_irq(lock: &xhci->lock); |
617 | |
618 | /* Deleting Compliance Mode Recovery Timer */ |
619 | if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && |
620 | (!(xhci_all_ports_seen_u0(xhci)))) { |
621 | del_timer_sync(timer: &xhci->comp_mode_recovery_timer); |
622 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
623 | fmt: "%s: compliance mode recovery timer deleted" , |
624 | __func__); |
625 | } |
626 | |
627 | if (xhci->quirks & XHCI_AMD_PLL_FIX) |
628 | usb_amd_dev_put(); |
629 | |
630 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, |
631 | fmt: "// Disabling event ring interrupts" ); |
632 | temp = readl(addr: &xhci->op_regs->status); |
633 | writel(val: (temp & ~0x1fff) | STS_EINT, addr: &xhci->op_regs->status); |
634 | xhci_disable_interrupter(ir); |
635 | |
636 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, fmt: "cleaning up memory" ); |
637 | xhci_mem_cleanup(xhci); |
638 | xhci_debugfs_exit(xhci); |
639 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, |
640 | fmt: "xhci_stop completed - status = %x" , |
641 | readl(addr: &xhci->op_regs->status)); |
642 | mutex_unlock(lock: &xhci->mutex); |
643 | } |
644 | EXPORT_SYMBOL_GPL(xhci_stop); |
645 | |
646 | /* |
647 | * Shutdown HC (not bus-specific) |
648 | * |
649 | * This is called when the machine is rebooting or halting. We assume that the |
650 | * machine will be powered off, and the HC's internal state will be reset. |
651 | * Don't bother to free memory. |
652 | * |
653 | * This will only ever be called with the main usb_hcd (the USB3 roothub). |
654 | */ |
655 | void xhci_shutdown(struct usb_hcd *hcd) |
656 | { |
657 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
658 | |
659 | if (xhci->quirks & XHCI_SPURIOUS_REBOOT) |
660 | usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev)); |
661 | |
662 | /* Don't poll the roothubs after shutdown. */ |
663 | xhci_dbg(xhci, "%s: stopping usb%d port polling.\n" , |
664 | __func__, hcd->self.busnum); |
665 | clear_bit(HCD_FLAG_POLL_RH, addr: &hcd->flags); |
666 | del_timer_sync(timer: &hcd->rh_timer); |
667 | |
668 | if (xhci->shared_hcd) { |
669 | clear_bit(HCD_FLAG_POLL_RH, addr: &xhci->shared_hcd->flags); |
670 | del_timer_sync(timer: &xhci->shared_hcd->rh_timer); |
671 | } |
672 | |
673 | spin_lock_irq(lock: &xhci->lock); |
674 | xhci_halt(xhci); |
675 | |
676 | /* |
677 | * Workaround for spurious wakeps at shutdown with HSW, and for boot |
678 | * firmware delay in ADL-P PCH if port are left in U3 at shutdown |
679 | */ |
680 | if (xhci->quirks & XHCI_SPURIOUS_WAKEUP || |
681 | xhci->quirks & XHCI_RESET_TO_DEFAULT) |
682 | xhci_reset(xhci, XHCI_RESET_SHORT_USEC); |
683 | |
684 | spin_unlock_irq(lock: &xhci->lock); |
685 | |
686 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, |
687 | fmt: "xhci_shutdown completed - status = %x" , |
688 | readl(addr: &xhci->op_regs->status)); |
689 | } |
690 | EXPORT_SYMBOL_GPL(xhci_shutdown); |
691 | |
692 | #ifdef CONFIG_PM |
693 | static void xhci_save_registers(struct xhci_hcd *xhci) |
694 | { |
695 | struct xhci_interrupter *ir = xhci->interrupter; |
696 | |
697 | xhci->s3.command = readl(addr: &xhci->op_regs->command); |
698 | xhci->s3.dev_nt = readl(addr: &xhci->op_regs->dev_notification); |
699 | xhci->s3.dcbaa_ptr = xhci_read_64(xhci, regs: &xhci->op_regs->dcbaa_ptr); |
700 | xhci->s3.config_reg = readl(addr: &xhci->op_regs->config_reg); |
701 | |
702 | if (!ir) |
703 | return; |
704 | |
705 | ir->s3_erst_size = readl(addr: &ir->ir_set->erst_size); |
706 | ir->s3_erst_base = xhci_read_64(xhci, regs: &ir->ir_set->erst_base); |
707 | ir->s3_erst_dequeue = xhci_read_64(xhci, regs: &ir->ir_set->erst_dequeue); |
708 | ir->s3_irq_pending = readl(addr: &ir->ir_set->irq_pending); |
709 | ir->s3_irq_control = readl(addr: &ir->ir_set->irq_control); |
710 | } |
711 | |
712 | static void xhci_restore_registers(struct xhci_hcd *xhci) |
713 | { |
714 | struct xhci_interrupter *ir = xhci->interrupter; |
715 | |
716 | writel(val: xhci->s3.command, addr: &xhci->op_regs->command); |
717 | writel(val: xhci->s3.dev_nt, addr: &xhci->op_regs->dev_notification); |
718 | xhci_write_64(xhci, val: xhci->s3.dcbaa_ptr, regs: &xhci->op_regs->dcbaa_ptr); |
719 | writel(val: xhci->s3.config_reg, addr: &xhci->op_regs->config_reg); |
720 | writel(val: ir->s3_erst_size, addr: &ir->ir_set->erst_size); |
721 | xhci_write_64(xhci, val: ir->s3_erst_base, regs: &ir->ir_set->erst_base); |
722 | xhci_write_64(xhci, val: ir->s3_erst_dequeue, regs: &ir->ir_set->erst_dequeue); |
723 | writel(val: ir->s3_irq_pending, addr: &ir->ir_set->irq_pending); |
724 | writel(val: ir->s3_irq_control, addr: &ir->ir_set->irq_control); |
725 | } |
726 | |
727 | static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) |
728 | { |
729 | u64 val_64; |
730 | |
731 | /* step 2: initialize command ring buffer */ |
732 | val_64 = xhci_read_64(xhci, regs: &xhci->op_regs->cmd_ring); |
733 | val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | |
734 | (xhci_trb_virt_to_dma(seg: xhci->cmd_ring->deq_seg, |
735 | trb: xhci->cmd_ring->dequeue) & |
736 | (u64) ~CMD_RING_RSVD_BITS) | |
737 | xhci->cmd_ring->cycle_state; |
738 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_init, |
739 | fmt: "// Setting command ring address to 0x%llx" , |
740 | (long unsigned long) val_64); |
741 | xhci_write_64(xhci, val: val_64, regs: &xhci->op_regs->cmd_ring); |
742 | } |
743 | |
744 | /* |
745 | * The whole command ring must be cleared to zero when we suspend the host. |
746 | * |
747 | * The host doesn't save the command ring pointer in the suspend well, so we |
748 | * need to re-program it on resume. Unfortunately, the pointer must be 64-byte |
749 | * aligned, because of the reserved bits in the command ring dequeue pointer |
750 | * register. Therefore, we can't just set the dequeue pointer back in the |
751 | * middle of the ring (TRBs are 16-byte aligned). |
752 | */ |
753 | static void xhci_clear_command_ring(struct xhci_hcd *xhci) |
754 | { |
755 | struct xhci_ring *ring; |
756 | struct xhci_segment *seg; |
757 | |
758 | ring = xhci->cmd_ring; |
759 | seg = ring->deq_seg; |
760 | do { |
761 | memset(seg->trbs, 0, |
762 | sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1)); |
763 | seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= |
764 | cpu_to_le32(~TRB_CYCLE); |
765 | seg = seg->next; |
766 | } while (seg != ring->deq_seg); |
767 | |
768 | /* Reset the software enqueue and dequeue pointers */ |
769 | ring->deq_seg = ring->first_seg; |
770 | ring->dequeue = ring->first_seg->trbs; |
771 | ring->enq_seg = ring->deq_seg; |
772 | ring->enqueue = ring->dequeue; |
773 | |
774 | ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1; |
775 | /* |
776 | * Ring is now zeroed, so the HW should look for change of ownership |
777 | * when the cycle bit is set to 1. |
778 | */ |
779 | ring->cycle_state = 1; |
780 | |
781 | /* |
782 | * Reset the hardware dequeue pointer. |
783 | * Yes, this will need to be re-written after resume, but we're paranoid |
784 | * and want to make sure the hardware doesn't access bogus memory |
785 | * because, say, the BIOS or an SMI started the host without changing |
786 | * the command ring pointers. |
787 | */ |
788 | xhci_set_cmd_ring_deq(xhci); |
789 | } |
790 | |
791 | /* |
792 | * Disable port wake bits if do_wakeup is not set. |
793 | * |
794 | * Also clear a possible internal port wake state left hanging for ports that |
795 | * detected termination but never successfully enumerated (trained to 0U). |
796 | * Internal wake causes immediate xHCI wake after suspend. PORT_CSC write done |
797 | * at enumeration clears this wake, force one here as well for unconnected ports |
798 | */ |
799 | |
800 | static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci, |
801 | struct xhci_hub *rhub, |
802 | bool do_wakeup) |
803 | { |
804 | unsigned long flags; |
805 | u32 t1, t2, portsc; |
806 | int i; |
807 | |
808 | spin_lock_irqsave(&xhci->lock, flags); |
809 | |
810 | for (i = 0; i < rhub->num_ports; i++) { |
811 | portsc = readl(addr: rhub->ports[i]->addr); |
812 | t1 = xhci_port_state_to_neutral(state: portsc); |
813 | t2 = t1; |
814 | |
815 | /* clear wake bits if do_wake is not set */ |
816 | if (!do_wakeup) |
817 | t2 &= ~PORT_WAKE_BITS; |
818 | |
819 | /* Don't touch csc bit if connected or connect change is set */ |
820 | if (!(portsc & (PORT_CSC | PORT_CONNECT))) |
821 | t2 |= PORT_CSC; |
822 | |
823 | if (t1 != t2) { |
824 | writel(val: t2, addr: rhub->ports[i]->addr); |
825 | xhci_dbg(xhci, "config port %d-%d wake bits, portsc: 0x%x, write: 0x%x\n" , |
826 | rhub->hcd->self.busnum, i + 1, portsc, t2); |
827 | } |
828 | } |
829 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
830 | } |
831 | |
832 | static bool xhci_pending_portevent(struct xhci_hcd *xhci) |
833 | { |
834 | struct xhci_port **ports; |
835 | int port_index; |
836 | u32 status; |
837 | u32 portsc; |
838 | |
839 | status = readl(addr: &xhci->op_regs->status); |
840 | if (status & STS_EINT) |
841 | return true; |
842 | /* |
843 | * Checking STS_EINT is not enough as there is a lag between a change |
844 | * bit being set and the Port Status Change Event that it generated |
845 | * being written to the Event Ring. See note in xhci 1.1 section 4.19.2. |
846 | */ |
847 | |
848 | port_index = xhci->usb2_rhub.num_ports; |
849 | ports = xhci->usb2_rhub.ports; |
850 | while (port_index--) { |
851 | portsc = readl(addr: ports[port_index]->addr); |
852 | if (portsc & PORT_CHANGE_MASK || |
853 | (portsc & PORT_PLS_MASK) == XDEV_RESUME) |
854 | return true; |
855 | } |
856 | port_index = xhci->usb3_rhub.num_ports; |
857 | ports = xhci->usb3_rhub.ports; |
858 | while (port_index--) { |
859 | portsc = readl(addr: ports[port_index]->addr); |
860 | if (portsc & (PORT_CHANGE_MASK | PORT_CAS) || |
861 | (portsc & PORT_PLS_MASK) == XDEV_RESUME) |
862 | return true; |
863 | } |
864 | return false; |
865 | } |
866 | |
867 | /* |
868 | * Stop HC (not bus-specific) |
869 | * |
870 | * This is called when the machine transition into S3/S4 mode. |
871 | * |
872 | */ |
873 | int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) |
874 | { |
875 | int rc = 0; |
876 | unsigned int delay = XHCI_MAX_HALT_USEC * 2; |
877 | struct usb_hcd *hcd = xhci_to_hcd(xhci); |
878 | u32 command; |
879 | u32 res; |
880 | |
881 | if (!hcd->state) |
882 | return 0; |
883 | |
884 | if (hcd->state != HC_STATE_SUSPENDED || |
885 | (xhci->shared_hcd && xhci->shared_hcd->state != HC_STATE_SUSPENDED)) |
886 | return -EINVAL; |
887 | |
888 | /* Clear root port wake on bits if wakeup not allowed. */ |
889 | xhci_disable_hub_port_wake(xhci, rhub: &xhci->usb3_rhub, do_wakeup); |
890 | xhci_disable_hub_port_wake(xhci, rhub: &xhci->usb2_rhub, do_wakeup); |
891 | |
892 | if (!HCD_HW_ACCESSIBLE(hcd)) |
893 | return 0; |
894 | |
895 | xhci_dbc_suspend(xhci); |
896 | |
897 | /* Don't poll the roothubs on bus suspend. */ |
898 | xhci_dbg(xhci, "%s: stopping usb%d port polling.\n" , |
899 | __func__, hcd->self.busnum); |
900 | clear_bit(HCD_FLAG_POLL_RH, addr: &hcd->flags); |
901 | del_timer_sync(timer: &hcd->rh_timer); |
902 | if (xhci->shared_hcd) { |
903 | clear_bit(HCD_FLAG_POLL_RH, addr: &xhci->shared_hcd->flags); |
904 | del_timer_sync(timer: &xhci->shared_hcd->rh_timer); |
905 | } |
906 | |
907 | if (xhci->quirks & XHCI_SUSPEND_DELAY) |
908 | usleep_range(min: 1000, max: 1500); |
909 | |
910 | spin_lock_irq(lock: &xhci->lock); |
911 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, addr: &hcd->flags); |
912 | if (xhci->shared_hcd) |
913 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, addr: &xhci->shared_hcd->flags); |
914 | /* step 1: stop endpoint */ |
915 | /* skipped assuming that port suspend has done */ |
916 | |
917 | /* step 2: clear Run/Stop bit */ |
918 | command = readl(addr: &xhci->op_regs->command); |
919 | command &= ~CMD_RUN; |
920 | writel(val: command, addr: &xhci->op_regs->command); |
921 | |
922 | /* Some chips from Fresco Logic need an extraordinary delay */ |
923 | delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1; |
924 | |
925 | if (xhci_handshake(ptr: &xhci->op_regs->status, |
926 | STS_HALT, STS_HALT, timeout_us: delay)) { |
927 | xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n" ); |
928 | spin_unlock_irq(lock: &xhci->lock); |
929 | return -ETIMEDOUT; |
930 | } |
931 | xhci_clear_command_ring(xhci); |
932 | |
933 | /* step 3: save registers */ |
934 | xhci_save_registers(xhci); |
935 | |
936 | /* step 4: set CSS flag */ |
937 | command = readl(addr: &xhci->op_regs->command); |
938 | command |= CMD_CSS; |
939 | writel(val: command, addr: &xhci->op_regs->command); |
940 | xhci->broken_suspend = 0; |
941 | if (xhci_handshake(ptr: &xhci->op_regs->status, |
942 | STS_SAVE, done: 0, timeout_us: 20 * 1000)) { |
943 | /* |
944 | * AMD SNPS xHC 3.0 occasionally does not clear the |
945 | * SSS bit of USBSTS and when driver tries to poll |
946 | * to see if the xHC clears BIT(8) which never happens |
947 | * and driver assumes that controller is not responding |
948 | * and times out. To workaround this, its good to check |
949 | * if SRE and HCE bits are not set (as per xhci |
950 | * Section 5.4.2) and bypass the timeout. |
951 | */ |
952 | res = readl(addr: &xhci->op_regs->status); |
953 | if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) && |
954 | (((res & STS_SRE) == 0) && |
955 | ((res & STS_HCE) == 0))) { |
956 | xhci->broken_suspend = 1; |
957 | } else { |
958 | xhci_warn(xhci, "WARN: xHC save state timeout\n" ); |
959 | spin_unlock_irq(lock: &xhci->lock); |
960 | return -ETIMEDOUT; |
961 | } |
962 | } |
963 | spin_unlock_irq(lock: &xhci->lock); |
964 | |
965 | /* |
966 | * Deleting Compliance Mode Recovery Timer because the xHCI Host |
967 | * is about to be suspended. |
968 | */ |
969 | if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && |
970 | (!(xhci_all_ports_seen_u0(xhci)))) { |
971 | del_timer_sync(timer: &xhci->comp_mode_recovery_timer); |
972 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
973 | fmt: "%s: compliance mode recovery timer deleted" , |
974 | __func__); |
975 | } |
976 | |
977 | return rc; |
978 | } |
979 | EXPORT_SYMBOL_GPL(xhci_suspend); |
980 | |
981 | /* |
982 | * start xHC (not bus-specific) |
983 | * |
984 | * This is called when the machine transition from S3/S4 mode. |
985 | * |
986 | */ |
987 | int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg) |
988 | { |
989 | bool hibernated = (msg.event == PM_EVENT_RESTORE); |
990 | u32 command, temp = 0; |
991 | struct usb_hcd *hcd = xhci_to_hcd(xhci); |
992 | int retval = 0; |
993 | bool comp_timer_running = false; |
994 | bool pending_portevent = false; |
995 | bool suspended_usb3_devs = false; |
996 | bool reinit_xhc = false; |
997 | |
998 | if (!hcd->state) |
999 | return 0; |
1000 | |
1001 | /* Wait a bit if either of the roothubs need to settle from the |
1002 | * transition into bus suspend. |
1003 | */ |
1004 | |
1005 | if (time_before(jiffies, xhci->usb2_rhub.bus_state.next_statechange) || |
1006 | time_before(jiffies, xhci->usb3_rhub.bus_state.next_statechange)) |
1007 | msleep(msecs: 100); |
1008 | |
1009 | set_bit(HCD_FLAG_HW_ACCESSIBLE, addr: &hcd->flags); |
1010 | if (xhci->shared_hcd) |
1011 | set_bit(HCD_FLAG_HW_ACCESSIBLE, addr: &xhci->shared_hcd->flags); |
1012 | |
1013 | spin_lock_irq(lock: &xhci->lock); |
1014 | |
1015 | if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend) |
1016 | reinit_xhc = true; |
1017 | |
1018 | if (!reinit_xhc) { |
1019 | /* |
1020 | * Some controllers might lose power during suspend, so wait |
1021 | * for controller not ready bit to clear, just as in xHC init. |
1022 | */ |
1023 | retval = xhci_handshake(ptr: &xhci->op_regs->status, |
1024 | STS_CNR, done: 0, timeout_us: 10 * 1000 * 1000); |
1025 | if (retval) { |
1026 | xhci_warn(xhci, "Controller not ready at resume %d\n" , |
1027 | retval); |
1028 | spin_unlock_irq(lock: &xhci->lock); |
1029 | return retval; |
1030 | } |
1031 | /* step 1: restore register */ |
1032 | xhci_restore_registers(xhci); |
1033 | /* step 2: initialize command ring buffer */ |
1034 | xhci_set_cmd_ring_deq(xhci); |
1035 | /* step 3: restore state and start state*/ |
1036 | /* step 3: set CRS flag */ |
1037 | command = readl(addr: &xhci->op_regs->command); |
1038 | command |= CMD_CRS; |
1039 | writel(val: command, addr: &xhci->op_regs->command); |
1040 | /* |
1041 | * Some controllers take up to 55+ ms to complete the controller |
1042 | * restore so setting the timeout to 100ms. Xhci specification |
1043 | * doesn't mention any timeout value. |
1044 | */ |
1045 | if (xhci_handshake(ptr: &xhci->op_regs->status, |
1046 | STS_RESTORE, done: 0, timeout_us: 100 * 1000)) { |
1047 | xhci_warn(xhci, "WARN: xHC restore state timeout\n" ); |
1048 | spin_unlock_irq(lock: &xhci->lock); |
1049 | return -ETIMEDOUT; |
1050 | } |
1051 | } |
1052 | |
1053 | temp = readl(addr: &xhci->op_regs->status); |
1054 | |
1055 | /* re-initialize the HC on Restore Error, or Host Controller Error */ |
1056 | if ((temp & (STS_SRE | STS_HCE)) && |
1057 | !(xhci->xhc_state & XHCI_STATE_REMOVING)) { |
1058 | reinit_xhc = true; |
1059 | if (!xhci->broken_suspend) |
1060 | xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n" , temp); |
1061 | } |
1062 | |
1063 | if (reinit_xhc) { |
1064 | if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && |
1065 | !(xhci_all_ports_seen_u0(xhci))) { |
1066 | del_timer_sync(timer: &xhci->comp_mode_recovery_timer); |
1067 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
1068 | fmt: "Compliance Mode Recovery Timer deleted!" ); |
1069 | } |
1070 | |
1071 | /* Let the USB core know _both_ roothubs lost power. */ |
1072 | usb_root_hub_lost_power(rhdev: xhci->main_hcd->self.root_hub); |
1073 | if (xhci->shared_hcd) |
1074 | usb_root_hub_lost_power(rhdev: xhci->shared_hcd->self.root_hub); |
1075 | |
1076 | xhci_dbg(xhci, "Stop HCD\n" ); |
1077 | xhci_halt(xhci); |
1078 | xhci_zero_64b_regs(xhci); |
1079 | retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC); |
1080 | spin_unlock_irq(lock: &xhci->lock); |
1081 | if (retval) |
1082 | return retval; |
1083 | |
1084 | xhci_dbg(xhci, "// Disabling event ring interrupts\n" ); |
1085 | temp = readl(addr: &xhci->op_regs->status); |
1086 | writel(val: (temp & ~0x1fff) | STS_EINT, addr: &xhci->op_regs->status); |
1087 | xhci_disable_interrupter(ir: xhci->interrupter); |
1088 | |
1089 | xhci_dbg(xhci, "cleaning up memory\n" ); |
1090 | xhci_mem_cleanup(xhci); |
1091 | xhci_debugfs_exit(xhci); |
1092 | xhci_dbg(xhci, "xhci_stop completed - status = %x\n" , |
1093 | readl(&xhci->op_regs->status)); |
1094 | |
1095 | /* USB core calls the PCI reinit and start functions twice: |
1096 | * first with the primary HCD, and then with the secondary HCD. |
1097 | * If we don't do the same, the host will never be started. |
1098 | */ |
1099 | xhci_dbg(xhci, "Initialize the xhci_hcd\n" ); |
1100 | retval = xhci_init(hcd); |
1101 | if (retval) |
1102 | return retval; |
1103 | comp_timer_running = true; |
1104 | |
1105 | xhci_dbg(xhci, "Start the primary HCD\n" ); |
1106 | retval = xhci_run(hcd); |
1107 | if (!retval && xhci->shared_hcd) { |
1108 | xhci_dbg(xhci, "Start the secondary HCD\n" ); |
1109 | retval = xhci_run(xhci->shared_hcd); |
1110 | } |
1111 | |
1112 | hcd->state = HC_STATE_SUSPENDED; |
1113 | if (xhci->shared_hcd) |
1114 | xhci->shared_hcd->state = HC_STATE_SUSPENDED; |
1115 | goto done; |
1116 | } |
1117 | |
1118 | /* step 4: set Run/Stop bit */ |
1119 | command = readl(addr: &xhci->op_regs->command); |
1120 | command |= CMD_RUN; |
1121 | writel(val: command, addr: &xhci->op_regs->command); |
1122 | xhci_handshake(ptr: &xhci->op_regs->status, STS_HALT, |
1123 | done: 0, timeout_us: 250 * 1000); |
1124 | |
1125 | /* step 5: walk topology and initialize portsc, |
1126 | * portpmsc and portli |
1127 | */ |
1128 | /* this is done in bus_resume */ |
1129 | |
1130 | /* step 6: restart each of the previously |
1131 | * Running endpoints by ringing their doorbells |
1132 | */ |
1133 | |
1134 | spin_unlock_irq(lock: &xhci->lock); |
1135 | |
1136 | xhci_dbc_resume(xhci); |
1137 | |
1138 | done: |
1139 | if (retval == 0) { |
1140 | /* |
1141 | * Resume roothubs only if there are pending events. |
1142 | * USB 3 devices resend U3 LFPS wake after a 100ms delay if |
1143 | * the first wake signalling failed, give it that chance if |
1144 | * there are suspended USB 3 devices. |
1145 | */ |
1146 | if (xhci->usb3_rhub.bus_state.suspended_ports || |
1147 | xhci->usb3_rhub.bus_state.bus_suspended) |
1148 | suspended_usb3_devs = true; |
1149 | |
1150 | pending_portevent = xhci_pending_portevent(xhci); |
1151 | |
1152 | if (suspended_usb3_devs && !pending_portevent && |
1153 | msg.event == PM_EVENT_AUTO_RESUME) { |
1154 | msleep(msecs: 120); |
1155 | pending_portevent = xhci_pending_portevent(xhci); |
1156 | } |
1157 | |
1158 | if (pending_portevent) { |
1159 | if (xhci->shared_hcd) |
1160 | usb_hcd_resume_root_hub(hcd: xhci->shared_hcd); |
1161 | usb_hcd_resume_root_hub(hcd); |
1162 | } |
1163 | } |
1164 | /* |
1165 | * If system is subject to the Quirk, Compliance Mode Timer needs to |
1166 | * be re-initialized Always after a system resume. Ports are subject |
1167 | * to suffer the Compliance Mode issue again. It doesn't matter if |
1168 | * ports have entered previously to U0 before system's suspension. |
1169 | */ |
1170 | if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running) |
1171 | compliance_mode_recovery_timer_init(xhci); |
1172 | |
1173 | if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL) |
1174 | usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller)); |
1175 | |
1176 | /* Re-enable port polling. */ |
1177 | xhci_dbg(xhci, "%s: starting usb%d port polling.\n" , |
1178 | __func__, hcd->self.busnum); |
1179 | if (xhci->shared_hcd) { |
1180 | set_bit(HCD_FLAG_POLL_RH, addr: &xhci->shared_hcd->flags); |
1181 | usb_hcd_poll_rh_status(hcd: xhci->shared_hcd); |
1182 | } |
1183 | set_bit(HCD_FLAG_POLL_RH, addr: &hcd->flags); |
1184 | usb_hcd_poll_rh_status(hcd); |
1185 | |
1186 | return retval; |
1187 | } |
1188 | EXPORT_SYMBOL_GPL(xhci_resume); |
1189 | #endif /* CONFIG_PM */ |
1190 | |
1191 | /*-------------------------------------------------------------------------*/ |
1192 | |
1193 | static int xhci_map_temp_buffer(struct usb_hcd *hcd, struct urb *urb) |
1194 | { |
1195 | void *temp; |
1196 | int ret = 0; |
1197 | unsigned int buf_len; |
1198 | enum dma_data_direction dir; |
1199 | |
1200 | dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; |
1201 | buf_len = urb->transfer_buffer_length; |
1202 | |
1203 | temp = kzalloc_node(size: buf_len, GFP_ATOMIC, |
1204 | node: dev_to_node(dev: hcd->self.sysdev)); |
1205 | |
1206 | if (usb_urb_dir_out(urb)) |
1207 | sg_pcopy_to_buffer(sgl: urb->sg, nents: urb->num_sgs, |
1208 | buf: temp, buflen: buf_len, skip: 0); |
1209 | |
1210 | urb->transfer_buffer = temp; |
1211 | urb->transfer_dma = dma_map_single(hcd->self.sysdev, |
1212 | urb->transfer_buffer, |
1213 | urb->transfer_buffer_length, |
1214 | dir); |
1215 | |
1216 | if (dma_mapping_error(dev: hcd->self.sysdev, |
1217 | dma_addr: urb->transfer_dma)) { |
1218 | ret = -EAGAIN; |
1219 | kfree(objp: temp); |
1220 | } else { |
1221 | urb->transfer_flags |= URB_DMA_MAP_SINGLE; |
1222 | } |
1223 | |
1224 | return ret; |
1225 | } |
1226 | |
1227 | static bool xhci_urb_temp_buffer_required(struct usb_hcd *hcd, |
1228 | struct urb *urb) |
1229 | { |
1230 | bool ret = false; |
1231 | unsigned int i; |
1232 | unsigned int len = 0; |
1233 | unsigned int trb_size; |
1234 | unsigned int max_pkt; |
1235 | struct scatterlist *sg; |
1236 | struct scatterlist *tail_sg; |
1237 | |
1238 | tail_sg = urb->sg; |
1239 | max_pkt = usb_endpoint_maxp(epd: &urb->ep->desc); |
1240 | |
1241 | if (!urb->num_sgs) |
1242 | return ret; |
1243 | |
1244 | if (urb->dev->speed >= USB_SPEED_SUPER) |
1245 | trb_size = TRB_CACHE_SIZE_SS; |
1246 | else |
1247 | trb_size = TRB_CACHE_SIZE_HS; |
1248 | |
1249 | if (urb->transfer_buffer_length != 0 && |
1250 | !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) { |
1251 | for_each_sg(urb->sg, sg, urb->num_sgs, i) { |
1252 | len = len + sg->length; |
1253 | if (i > trb_size - 2) { |
1254 | len = len - tail_sg->length; |
1255 | if (len < max_pkt) { |
1256 | ret = true; |
1257 | break; |
1258 | } |
1259 | |
1260 | tail_sg = sg_next(tail_sg); |
1261 | } |
1262 | } |
1263 | } |
1264 | return ret; |
1265 | } |
1266 | |
1267 | static void xhci_unmap_temp_buf(struct usb_hcd *hcd, struct urb *urb) |
1268 | { |
1269 | unsigned int len; |
1270 | unsigned int buf_len; |
1271 | enum dma_data_direction dir; |
1272 | |
1273 | dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; |
1274 | |
1275 | buf_len = urb->transfer_buffer_length; |
1276 | |
1277 | if (IS_ENABLED(CONFIG_HAS_DMA) && |
1278 | (urb->transfer_flags & URB_DMA_MAP_SINGLE)) |
1279 | dma_unmap_single(hcd->self.sysdev, |
1280 | urb->transfer_dma, |
1281 | urb->transfer_buffer_length, |
1282 | dir); |
1283 | |
1284 | if (usb_urb_dir_in(urb)) { |
1285 | len = sg_pcopy_from_buffer(sgl: urb->sg, nents: urb->num_sgs, |
1286 | buf: urb->transfer_buffer, |
1287 | buflen: buf_len, |
1288 | skip: 0); |
1289 | if (len != buf_len) { |
1290 | xhci_dbg(hcd_to_xhci(hcd), |
1291 | "Copy from tmp buf to urb sg list failed\n" ); |
1292 | urb->actual_length = len; |
1293 | } |
1294 | } |
1295 | urb->transfer_flags &= ~URB_DMA_MAP_SINGLE; |
1296 | kfree(objp: urb->transfer_buffer); |
1297 | urb->transfer_buffer = NULL; |
1298 | } |
1299 | |
1300 | /* |
1301 | * Bypass the DMA mapping if URB is suitable for Immediate Transfer (IDT), |
1302 | * we'll copy the actual data into the TRB address register. This is limited to |
1303 | * transfers up to 8 bytes on output endpoints of any kind with wMaxPacketSize |
1304 | * >= 8 bytes. If suitable for IDT only one Transfer TRB per TD is allowed. |
1305 | */ |
1306 | static int xhci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, |
1307 | gfp_t mem_flags) |
1308 | { |
1309 | struct xhci_hcd *xhci; |
1310 | |
1311 | xhci = hcd_to_xhci(hcd); |
1312 | |
1313 | if (xhci_urb_suitable_for_idt(urb)) |
1314 | return 0; |
1315 | |
1316 | if (xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) { |
1317 | if (xhci_urb_temp_buffer_required(hcd, urb)) |
1318 | return xhci_map_temp_buffer(hcd, urb); |
1319 | } |
1320 | return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); |
1321 | } |
1322 | |
1323 | static void xhci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) |
1324 | { |
1325 | struct xhci_hcd *xhci; |
1326 | bool unmap_temp_buf = false; |
1327 | |
1328 | xhci = hcd_to_xhci(hcd); |
1329 | |
1330 | if (urb->num_sgs && (urb->transfer_flags & URB_DMA_MAP_SINGLE)) |
1331 | unmap_temp_buf = true; |
1332 | |
1333 | if ((xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) && unmap_temp_buf) |
1334 | xhci_unmap_temp_buf(hcd, urb); |
1335 | else |
1336 | usb_hcd_unmap_urb_for_dma(hcd, urb); |
1337 | } |
1338 | |
1339 | /** |
1340 | * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and |
1341 | * HCDs. Find the index for an endpoint given its descriptor. Use the return |
1342 | * value to right shift 1 for the bitmask. |
1343 | * |
1344 | * Index = (epnum * 2) + direction - 1, |
1345 | * where direction = 0 for OUT, 1 for IN. |
1346 | * For control endpoints, the IN index is used (OUT index is unused), so |
1347 | * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2) |
1348 | */ |
1349 | unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc) |
1350 | { |
1351 | unsigned int index; |
1352 | if (usb_endpoint_xfer_control(epd: desc)) |
1353 | index = (unsigned int) (usb_endpoint_num(epd: desc)*2); |
1354 | else |
1355 | index = (unsigned int) (usb_endpoint_num(epd: desc)*2) + |
1356 | (usb_endpoint_dir_in(epd: desc) ? 1 : 0) - 1; |
1357 | return index; |
1358 | } |
1359 | EXPORT_SYMBOL_GPL(xhci_get_endpoint_index); |
1360 | |
1361 | /* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint |
1362 | * address from the XHCI endpoint index. |
1363 | */ |
1364 | static unsigned int xhci_get_endpoint_address(unsigned int ep_index) |
1365 | { |
1366 | unsigned int number = DIV_ROUND_UP(ep_index, 2); |
1367 | unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN; |
1368 | return direction | number; |
1369 | } |
1370 | |
1371 | /* Find the flag for this endpoint (for use in the control context). Use the |
1372 | * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is |
1373 | * bit 1, etc. |
1374 | */ |
1375 | static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc) |
1376 | { |
1377 | return 1 << (xhci_get_endpoint_index(desc) + 1); |
1378 | } |
1379 | |
1380 | /* Compute the last valid endpoint context index. Basically, this is the |
1381 | * endpoint index plus one. For slot contexts with more than valid endpoint, |
1382 | * we find the most significant bit set in the added contexts flags. |
1383 | * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000 |
1384 | * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one. |
1385 | */ |
1386 | unsigned int xhci_last_valid_endpoint(u32 added_ctxs) |
1387 | { |
1388 | return fls(x: added_ctxs) - 1; |
1389 | } |
1390 | |
1391 | /* Returns 1 if the arguments are OK; |
1392 | * returns 0 this is a root hub; returns -EINVAL for NULL pointers. |
1393 | */ |
1394 | static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, |
1395 | struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev, |
1396 | const char *func) { |
1397 | struct xhci_hcd *xhci; |
1398 | struct xhci_virt_device *virt_dev; |
1399 | |
1400 | if (!hcd || (check_ep && !ep) || !udev) { |
1401 | pr_debug("xHCI %s called with invalid args\n" , func); |
1402 | return -EINVAL; |
1403 | } |
1404 | if (!udev->parent) { |
1405 | pr_debug("xHCI %s called for root hub\n" , func); |
1406 | return 0; |
1407 | } |
1408 | |
1409 | xhci = hcd_to_xhci(hcd); |
1410 | if (check_virt_dev) { |
1411 | if (!udev->slot_id || !xhci->devs[udev->slot_id]) { |
1412 | xhci_dbg(xhci, "xHCI %s called with unaddressed device\n" , |
1413 | func); |
1414 | return -EINVAL; |
1415 | } |
1416 | |
1417 | virt_dev = xhci->devs[udev->slot_id]; |
1418 | if (virt_dev->udev != udev) { |
1419 | xhci_dbg(xhci, "xHCI %s called with udev and " |
1420 | "virt_dev does not match\n" , func); |
1421 | return -EINVAL; |
1422 | } |
1423 | } |
1424 | |
1425 | if (xhci->xhc_state & XHCI_STATE_HALTED) |
1426 | return -ENODEV; |
1427 | |
1428 | return 1; |
1429 | } |
1430 | |
1431 | static int xhci_configure_endpoint(struct xhci_hcd *xhci, |
1432 | struct usb_device *udev, struct xhci_command *command, |
1433 | bool ctx_change, bool must_succeed); |
1434 | |
1435 | /* |
1436 | * Full speed devices may have a max packet size greater than 8 bytes, but the |
1437 | * USB core doesn't know that until it reads the first 8 bytes of the |
1438 | * descriptor. If the usb_device's max packet size changes after that point, |
1439 | * we need to issue an evaluate context command and wait on it. |
1440 | */ |
1441 | static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, |
1442 | unsigned int ep_index, struct urb *urb, gfp_t mem_flags) |
1443 | { |
1444 | struct xhci_container_ctx *out_ctx; |
1445 | struct xhci_input_control_ctx *ctrl_ctx; |
1446 | struct xhci_ep_ctx *ep_ctx; |
1447 | struct xhci_command *command; |
1448 | int max_packet_size; |
1449 | int hw_max_packet_size; |
1450 | int ret = 0; |
1451 | |
1452 | out_ctx = xhci->devs[slot_id]->out_ctx; |
1453 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: out_ctx, ep_index); |
1454 | hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2)); |
1455 | max_packet_size = usb_endpoint_maxp(epd: &urb->dev->ep0.desc); |
1456 | if (hw_max_packet_size != max_packet_size) { |
1457 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_context_change, |
1458 | fmt: "Max Packet Size for ep 0 changed." ); |
1459 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_context_change, |
1460 | fmt: "Max packet size in usb_device = %d" , |
1461 | max_packet_size); |
1462 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_context_change, |
1463 | fmt: "Max packet size in xHCI HW = %d" , |
1464 | hw_max_packet_size); |
1465 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_context_change, |
1466 | fmt: "Issuing evaluate context command." ); |
1467 | |
1468 | /* Set up the input context flags for the command */ |
1469 | /* FIXME: This won't work if a non-default control endpoint |
1470 | * changes max packet sizes. |
1471 | */ |
1472 | |
1473 | command = xhci_alloc_command(xhci, allocate_completion: true, mem_flags); |
1474 | if (!command) |
1475 | return -ENOMEM; |
1476 | |
1477 | command->in_ctx = xhci->devs[slot_id]->in_ctx; |
1478 | ctrl_ctx = xhci_get_input_control_ctx(ctx: command->in_ctx); |
1479 | if (!ctrl_ctx) { |
1480 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n" , |
1481 | __func__); |
1482 | ret = -ENOMEM; |
1483 | goto command_cleanup; |
1484 | } |
1485 | /* Set up the modified control endpoint 0 */ |
1486 | xhci_endpoint_copy(xhci, in_ctx: xhci->devs[slot_id]->in_ctx, |
1487 | out_ctx: xhci->devs[slot_id]->out_ctx, ep_index); |
1488 | |
1489 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: command->in_ctx, ep_index); |
1490 | ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */ |
1491 | ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK); |
1492 | ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size)); |
1493 | |
1494 | ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG); |
1495 | ctrl_ctx->drop_flags = 0; |
1496 | |
1497 | ret = xhci_configure_endpoint(xhci, udev: urb->dev, command, |
1498 | ctx_change: true, must_succeed: false); |
1499 | |
1500 | /* Clean up the input context for later use by bandwidth |
1501 | * functions. |
1502 | */ |
1503 | ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG); |
1504 | command_cleanup: |
1505 | kfree(objp: command->completion); |
1506 | kfree(objp: command); |
1507 | } |
1508 | return ret; |
1509 | } |
1510 | |
1511 | /* |
1512 | * non-error returns are a promise to giveback() the urb later |
1513 | * we drop ownership so next owner (or urb unlink) can get it |
1514 | */ |
1515 | static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) |
1516 | { |
1517 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
1518 | unsigned long flags; |
1519 | int ret = 0; |
1520 | unsigned int slot_id, ep_index; |
1521 | unsigned int *ep_state; |
1522 | struct urb_priv *urb_priv; |
1523 | int num_tds; |
1524 | |
1525 | if (!urb) |
1526 | return -EINVAL; |
1527 | ret = xhci_check_args(hcd, udev: urb->dev, ep: urb->ep, |
1528 | check_ep: true, check_virt_dev: true, func: __func__); |
1529 | if (ret <= 0) |
1530 | return ret ? ret : -EINVAL; |
1531 | |
1532 | slot_id = urb->dev->slot_id; |
1533 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); |
1534 | ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state; |
1535 | |
1536 | if (!HCD_HW_ACCESSIBLE(hcd)) |
1537 | return -ESHUTDOWN; |
1538 | |
1539 | if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) { |
1540 | xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n" ); |
1541 | return -ENODEV; |
1542 | } |
1543 | |
1544 | if (usb_endpoint_xfer_isoc(epd: &urb->ep->desc)) |
1545 | num_tds = urb->number_of_packets; |
1546 | else if (usb_endpoint_is_bulk_out(epd: &urb->ep->desc) && |
1547 | urb->transfer_buffer_length > 0 && |
1548 | urb->transfer_flags & URB_ZERO_PACKET && |
1549 | !(urb->transfer_buffer_length % usb_endpoint_maxp(epd: &urb->ep->desc))) |
1550 | num_tds = 2; |
1551 | else |
1552 | num_tds = 1; |
1553 | |
1554 | urb_priv = kzalloc(struct_size(urb_priv, td, num_tds), flags: mem_flags); |
1555 | if (!urb_priv) |
1556 | return -ENOMEM; |
1557 | |
1558 | urb_priv->num_tds = num_tds; |
1559 | urb_priv->num_tds_done = 0; |
1560 | urb->hcpriv = urb_priv; |
1561 | |
1562 | trace_xhci_urb_enqueue(urb); |
1563 | |
1564 | if (usb_endpoint_xfer_control(epd: &urb->ep->desc)) { |
1565 | /* Check to see if the max packet size for the default control |
1566 | * endpoint changed during FS device enumeration |
1567 | */ |
1568 | if (urb->dev->speed == USB_SPEED_FULL) { |
1569 | ret = xhci_check_maxpacket(xhci, slot_id, |
1570 | ep_index, urb, mem_flags); |
1571 | if (ret < 0) { |
1572 | xhci_urb_free_priv(urb_priv); |
1573 | urb->hcpriv = NULL; |
1574 | return ret; |
1575 | } |
1576 | } |
1577 | } |
1578 | |
1579 | spin_lock_irqsave(&xhci->lock, flags); |
1580 | |
1581 | if (xhci->xhc_state & XHCI_STATE_DYING) { |
1582 | xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n" , |
1583 | urb->ep->desc.bEndpointAddress, urb); |
1584 | ret = -ESHUTDOWN; |
1585 | goto free_priv; |
1586 | } |
1587 | if (*ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) { |
1588 | xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n" , |
1589 | *ep_state); |
1590 | ret = -EINVAL; |
1591 | goto free_priv; |
1592 | } |
1593 | if (*ep_state & EP_SOFT_CLEAR_TOGGLE) { |
1594 | xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n" ); |
1595 | ret = -EINVAL; |
1596 | goto free_priv; |
1597 | } |
1598 | |
1599 | switch (usb_endpoint_type(epd: &urb->ep->desc)) { |
1600 | |
1601 | case USB_ENDPOINT_XFER_CONTROL: |
1602 | ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, |
1603 | slot_id, ep_index); |
1604 | break; |
1605 | case USB_ENDPOINT_XFER_BULK: |
1606 | ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, |
1607 | slot_id, ep_index); |
1608 | break; |
1609 | case USB_ENDPOINT_XFER_INT: |
1610 | ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, |
1611 | slot_id, ep_index); |
1612 | break; |
1613 | case USB_ENDPOINT_XFER_ISOC: |
1614 | ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, |
1615 | slot_id, ep_index); |
1616 | } |
1617 | |
1618 | if (ret) { |
1619 | free_priv: |
1620 | xhci_urb_free_priv(urb_priv); |
1621 | urb->hcpriv = NULL; |
1622 | } |
1623 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
1624 | return ret; |
1625 | } |
1626 | |
1627 | /* |
1628 | * Remove the URB's TD from the endpoint ring. This may cause the HC to stop |
1629 | * USB transfers, potentially stopping in the middle of a TRB buffer. The HC |
1630 | * should pick up where it left off in the TD, unless a Set Transfer Ring |
1631 | * Dequeue Pointer is issued. |
1632 | * |
1633 | * The TRBs that make up the buffers for the canceled URB will be "removed" from |
1634 | * the ring. Since the ring is a contiguous structure, they can't be physically |
1635 | * removed. Instead, there are two options: |
1636 | * |
1637 | * 1) If the HC is in the middle of processing the URB to be canceled, we |
1638 | * simply move the ring's dequeue pointer past those TRBs using the Set |
1639 | * Transfer Ring Dequeue Pointer command. This will be the common case, |
1640 | * when drivers timeout on the last submitted URB and attempt to cancel. |
1641 | * |
1642 | * 2) If the HC is in the middle of a different TD, we turn the TRBs into a |
1643 | * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The |
1644 | * HC will need to invalidate the any TRBs it has cached after the stop |
1645 | * endpoint command, as noted in the xHCI 0.95 errata. |
1646 | * |
1647 | * 3) The TD may have completed by the time the Stop Endpoint Command |
1648 | * completes, so software needs to handle that case too. |
1649 | * |
1650 | * This function should protect against the TD enqueueing code ringing the |
1651 | * doorbell while this code is waiting for a Stop Endpoint command to complete. |
1652 | * It also needs to account for multiple cancellations on happening at the same |
1653 | * time for the same endpoint. |
1654 | * |
1655 | * Note that this function can be called in any context, or so says |
1656 | * usb_hcd_unlink_urb() |
1657 | */ |
1658 | static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) |
1659 | { |
1660 | unsigned long flags; |
1661 | int ret, i; |
1662 | u32 temp; |
1663 | struct xhci_hcd *xhci; |
1664 | struct urb_priv *urb_priv; |
1665 | struct xhci_td *td; |
1666 | unsigned int ep_index; |
1667 | struct xhci_ring *ep_ring; |
1668 | struct xhci_virt_ep *ep; |
1669 | struct xhci_command *command; |
1670 | struct xhci_virt_device *vdev; |
1671 | |
1672 | xhci = hcd_to_xhci(hcd); |
1673 | spin_lock_irqsave(&xhci->lock, flags); |
1674 | |
1675 | trace_xhci_urb_dequeue(urb); |
1676 | |
1677 | /* Make sure the URB hasn't completed or been unlinked already */ |
1678 | ret = usb_hcd_check_unlink_urb(hcd, urb, status); |
1679 | if (ret) |
1680 | goto done; |
1681 | |
1682 | /* give back URB now if we can't queue it for cancel */ |
1683 | vdev = xhci->devs[urb->dev->slot_id]; |
1684 | urb_priv = urb->hcpriv; |
1685 | if (!vdev || !urb_priv) |
1686 | goto err_giveback; |
1687 | |
1688 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); |
1689 | ep = &vdev->eps[ep_index]; |
1690 | ep_ring = xhci_urb_to_transfer_ring(xhci, urb); |
1691 | if (!ep || !ep_ring) |
1692 | goto err_giveback; |
1693 | |
1694 | /* If xHC is dead take it down and return ALL URBs in xhci_hc_died() */ |
1695 | temp = readl(addr: &xhci->op_regs->status); |
1696 | if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) { |
1697 | xhci_hc_died(xhci); |
1698 | goto done; |
1699 | } |
1700 | |
1701 | /* |
1702 | * check ring is not re-allocated since URB was enqueued. If it is, then |
1703 | * make sure none of the ring related pointers in this URB private data |
1704 | * are touched, such as td_list, otherwise we overwrite freed data |
1705 | */ |
1706 | if (!td_on_ring(td: &urb_priv->td[0], ring: ep_ring)) { |
1707 | xhci_err(xhci, "Canceled URB td not found on endpoint ring" ); |
1708 | for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) { |
1709 | td = &urb_priv->td[i]; |
1710 | if (!list_empty(head: &td->cancelled_td_list)) |
1711 | list_del_init(entry: &td->cancelled_td_list); |
1712 | } |
1713 | goto err_giveback; |
1714 | } |
1715 | |
1716 | if (xhci->xhc_state & XHCI_STATE_HALTED) { |
1717 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_cancel_urb, |
1718 | fmt: "HC halted, freeing TD manually." ); |
1719 | for (i = urb_priv->num_tds_done; |
1720 | i < urb_priv->num_tds; |
1721 | i++) { |
1722 | td = &urb_priv->td[i]; |
1723 | if (!list_empty(head: &td->td_list)) |
1724 | list_del_init(entry: &td->td_list); |
1725 | if (!list_empty(head: &td->cancelled_td_list)) |
1726 | list_del_init(entry: &td->cancelled_td_list); |
1727 | } |
1728 | goto err_giveback; |
1729 | } |
1730 | |
1731 | i = urb_priv->num_tds_done; |
1732 | if (i < urb_priv->num_tds) |
1733 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_cancel_urb, |
1734 | fmt: "Cancel URB %p, dev %s, ep 0x%x, " |
1735 | "starting at offset 0x%llx" , |
1736 | urb, urb->dev->devpath, |
1737 | urb->ep->desc.bEndpointAddress, |
1738 | (unsigned long long) xhci_trb_virt_to_dma( |
1739 | seg: urb_priv->td[i].start_seg, |
1740 | trb: urb_priv->td[i].first_trb)); |
1741 | |
1742 | for (; i < urb_priv->num_tds; i++) { |
1743 | td = &urb_priv->td[i]; |
1744 | /* TD can already be on cancelled list if ep halted on it */ |
1745 | if (list_empty(head: &td->cancelled_td_list)) { |
1746 | td->cancel_status = TD_DIRTY; |
1747 | list_add_tail(new: &td->cancelled_td_list, |
1748 | head: &ep->cancelled_td_list); |
1749 | } |
1750 | } |
1751 | |
1752 | /* Queue a stop endpoint command, but only if this is |
1753 | * the first cancellation to be handled. |
1754 | */ |
1755 | if (!(ep->ep_state & EP_STOP_CMD_PENDING)) { |
1756 | command = xhci_alloc_command(xhci, allocate_completion: false, GFP_ATOMIC); |
1757 | if (!command) { |
1758 | ret = -ENOMEM; |
1759 | goto done; |
1760 | } |
1761 | ep->ep_state |= EP_STOP_CMD_PENDING; |
1762 | xhci_queue_stop_endpoint(xhci, cmd: command, slot_id: urb->dev->slot_id, |
1763 | ep_index, suspend: 0); |
1764 | xhci_ring_cmd_db(xhci); |
1765 | } |
1766 | done: |
1767 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
1768 | return ret; |
1769 | |
1770 | err_giveback: |
1771 | if (urb_priv) |
1772 | xhci_urb_free_priv(urb_priv); |
1773 | usb_hcd_unlink_urb_from_ep(hcd, urb); |
1774 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
1775 | usb_hcd_giveback_urb(hcd, urb, status: -ESHUTDOWN); |
1776 | return ret; |
1777 | } |
1778 | |
1779 | /* Drop an endpoint from a new bandwidth configuration for this device. |
1780 | * Only one call to this function is allowed per endpoint before |
1781 | * check_bandwidth() or reset_bandwidth() must be called. |
1782 | * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will |
1783 | * add the endpoint to the schedule with possibly new parameters denoted by a |
1784 | * different endpoint descriptor in usb_host_endpoint. |
1785 | * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is |
1786 | * not allowed. |
1787 | * |
1788 | * The USB core will not allow URBs to be queued to an endpoint that is being |
1789 | * disabled, so there's no need for mutual exclusion to protect |
1790 | * the xhci->devs[slot_id] structure. |
1791 | */ |
1792 | int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, |
1793 | struct usb_host_endpoint *ep) |
1794 | { |
1795 | struct xhci_hcd *xhci; |
1796 | struct xhci_container_ctx *in_ctx, *out_ctx; |
1797 | struct xhci_input_control_ctx *ctrl_ctx; |
1798 | unsigned int ep_index; |
1799 | struct xhci_ep_ctx *ep_ctx; |
1800 | u32 drop_flag; |
1801 | u32 new_add_flags, new_drop_flags; |
1802 | int ret; |
1803 | |
1804 | ret = xhci_check_args(hcd, udev, ep, check_ep: 1, check_virt_dev: true, func: __func__); |
1805 | if (ret <= 0) |
1806 | return ret; |
1807 | xhci = hcd_to_xhci(hcd); |
1808 | if (xhci->xhc_state & XHCI_STATE_DYING) |
1809 | return -ENODEV; |
1810 | |
1811 | xhci_dbg(xhci, "%s called for udev %p\n" , __func__, udev); |
1812 | drop_flag = xhci_get_endpoint_flag(desc: &ep->desc); |
1813 | if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) { |
1814 | xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n" , |
1815 | __func__, drop_flag); |
1816 | return 0; |
1817 | } |
1818 | |
1819 | in_ctx = xhci->devs[udev->slot_id]->in_ctx; |
1820 | out_ctx = xhci->devs[udev->slot_id]->out_ctx; |
1821 | ctrl_ctx = xhci_get_input_control_ctx(ctx: in_ctx); |
1822 | if (!ctrl_ctx) { |
1823 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n" , |
1824 | __func__); |
1825 | return 0; |
1826 | } |
1827 | |
1828 | ep_index = xhci_get_endpoint_index(&ep->desc); |
1829 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: out_ctx, ep_index); |
1830 | /* If the HC already knows the endpoint is disabled, |
1831 | * or the HCD has noted it is disabled, ignore this request |
1832 | */ |
1833 | if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) || |
1834 | le32_to_cpu(ctrl_ctx->drop_flags) & |
1835 | xhci_get_endpoint_flag(desc: &ep->desc)) { |
1836 | /* Do not warn when called after a usb_device_reset */ |
1837 | if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL) |
1838 | xhci_warn(xhci, "xHCI %s called with disabled ep %p\n" , |
1839 | __func__, ep); |
1840 | return 0; |
1841 | } |
1842 | |
1843 | ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag); |
1844 | new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); |
1845 | |
1846 | ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag); |
1847 | new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); |
1848 | |
1849 | xhci_debugfs_remove_endpoint(xhci, virt_dev: xhci->devs[udev->slot_id], ep_index); |
1850 | |
1851 | xhci_endpoint_zero(xhci, virt_dev: xhci->devs[udev->slot_id], ep); |
1852 | |
1853 | xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n" , |
1854 | (unsigned int) ep->desc.bEndpointAddress, |
1855 | udev->slot_id, |
1856 | (unsigned int) new_drop_flags, |
1857 | (unsigned int) new_add_flags); |
1858 | return 0; |
1859 | } |
1860 | EXPORT_SYMBOL_GPL(xhci_drop_endpoint); |
1861 | |
1862 | /* Add an endpoint to a new possible bandwidth configuration for this device. |
1863 | * Only one call to this function is allowed per endpoint before |
1864 | * check_bandwidth() or reset_bandwidth() must be called. |
1865 | * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will |
1866 | * add the endpoint to the schedule with possibly new parameters denoted by a |
1867 | * different endpoint descriptor in usb_host_endpoint. |
1868 | * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is |
1869 | * not allowed. |
1870 | * |
1871 | * The USB core will not allow URBs to be queued to an endpoint until the |
1872 | * configuration or alt setting is installed in the device, so there's no need |
1873 | * for mutual exclusion to protect the xhci->devs[slot_id] structure. |
1874 | */ |
1875 | int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, |
1876 | struct usb_host_endpoint *ep) |
1877 | { |
1878 | struct xhci_hcd *xhci; |
1879 | struct xhci_container_ctx *in_ctx; |
1880 | unsigned int ep_index; |
1881 | struct xhci_input_control_ctx *ctrl_ctx; |
1882 | struct xhci_ep_ctx *ep_ctx; |
1883 | u32 added_ctxs; |
1884 | u32 new_add_flags, new_drop_flags; |
1885 | struct xhci_virt_device *virt_dev; |
1886 | int ret = 0; |
1887 | |
1888 | ret = xhci_check_args(hcd, udev, ep, check_ep: 1, check_virt_dev: true, func: __func__); |
1889 | if (ret <= 0) { |
1890 | /* So we won't queue a reset ep command for a root hub */ |
1891 | ep->hcpriv = NULL; |
1892 | return ret; |
1893 | } |
1894 | xhci = hcd_to_xhci(hcd); |
1895 | if (xhci->xhc_state & XHCI_STATE_DYING) |
1896 | return -ENODEV; |
1897 | |
1898 | added_ctxs = xhci_get_endpoint_flag(desc: &ep->desc); |
1899 | if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) { |
1900 | /* FIXME when we have to issue an evaluate endpoint command to |
1901 | * deal with ep0 max packet size changing once we get the |
1902 | * descriptors |
1903 | */ |
1904 | xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n" , |
1905 | __func__, added_ctxs); |
1906 | return 0; |
1907 | } |
1908 | |
1909 | virt_dev = xhci->devs[udev->slot_id]; |
1910 | in_ctx = virt_dev->in_ctx; |
1911 | ctrl_ctx = xhci_get_input_control_ctx(ctx: in_ctx); |
1912 | if (!ctrl_ctx) { |
1913 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n" , |
1914 | __func__); |
1915 | return 0; |
1916 | } |
1917 | |
1918 | ep_index = xhci_get_endpoint_index(&ep->desc); |
1919 | /* If this endpoint is already in use, and the upper layers are trying |
1920 | * to add it again without dropping it, reject the addition. |
1921 | */ |
1922 | if (virt_dev->eps[ep_index].ring && |
1923 | !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) { |
1924 | xhci_warn(xhci, "Trying to add endpoint 0x%x " |
1925 | "without dropping it.\n" , |
1926 | (unsigned int) ep->desc.bEndpointAddress); |
1927 | return -EINVAL; |
1928 | } |
1929 | |
1930 | /* If the HCD has already noted the endpoint is enabled, |
1931 | * ignore this request. |
1932 | */ |
1933 | if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) { |
1934 | xhci_warn(xhci, "xHCI %s called with enabled ep %p\n" , |
1935 | __func__, ep); |
1936 | return 0; |
1937 | } |
1938 | |
1939 | /* |
1940 | * Configuration and alternate setting changes must be done in |
1941 | * process context, not interrupt context (or so documenation |
1942 | * for usb_set_interface() and usb_set_configuration() claim). |
1943 | */ |
1944 | if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) { |
1945 | dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n" , |
1946 | __func__, ep->desc.bEndpointAddress); |
1947 | return -ENOMEM; |
1948 | } |
1949 | |
1950 | ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs); |
1951 | new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); |
1952 | |
1953 | /* If xhci_endpoint_disable() was called for this endpoint, but the |
1954 | * xHC hasn't been notified yet through the check_bandwidth() call, |
1955 | * this re-adds a new state for the endpoint from the new endpoint |
1956 | * descriptors. We must drop and re-add this endpoint, so we leave the |
1957 | * drop flags alone. |
1958 | */ |
1959 | new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); |
1960 | |
1961 | /* Store the usb_device pointer for later use */ |
1962 | ep->hcpriv = udev; |
1963 | |
1964 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: virt_dev->in_ctx, ep_index); |
1965 | trace_xhci_add_endpoint(ctx: ep_ctx); |
1966 | |
1967 | xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n" , |
1968 | (unsigned int) ep->desc.bEndpointAddress, |
1969 | udev->slot_id, |
1970 | (unsigned int) new_drop_flags, |
1971 | (unsigned int) new_add_flags); |
1972 | return 0; |
1973 | } |
1974 | EXPORT_SYMBOL_GPL(xhci_add_endpoint); |
1975 | |
1976 | static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) |
1977 | { |
1978 | struct xhci_input_control_ctx *ctrl_ctx; |
1979 | struct xhci_ep_ctx *ep_ctx; |
1980 | struct xhci_slot_ctx *slot_ctx; |
1981 | int i; |
1982 | |
1983 | ctrl_ctx = xhci_get_input_control_ctx(ctx: virt_dev->in_ctx); |
1984 | if (!ctrl_ctx) { |
1985 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n" , |
1986 | __func__); |
1987 | return; |
1988 | } |
1989 | |
1990 | /* When a device's add flag and drop flag are zero, any subsequent |
1991 | * configure endpoint command will leave that endpoint's state |
1992 | * untouched. Make sure we don't leave any old state in the input |
1993 | * endpoint contexts. |
1994 | */ |
1995 | ctrl_ctx->drop_flags = 0; |
1996 | ctrl_ctx->add_flags = 0; |
1997 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: virt_dev->in_ctx); |
1998 | slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); |
1999 | /* Endpoint 0 is always valid */ |
2000 | slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); |
2001 | for (i = 1; i < 31; i++) { |
2002 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: virt_dev->in_ctx, ep_index: i); |
2003 | ep_ctx->ep_info = 0; |
2004 | ep_ctx->ep_info2 = 0; |
2005 | ep_ctx->deq = 0; |
2006 | ep_ctx->tx_info = 0; |
2007 | } |
2008 | } |
2009 | |
2010 | static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, |
2011 | struct usb_device *udev, u32 *cmd_status) |
2012 | { |
2013 | int ret; |
2014 | |
2015 | switch (*cmd_status) { |
2016 | case COMP_COMMAND_ABORTED: |
2017 | case COMP_COMMAND_RING_STOPPED: |
2018 | xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n" ); |
2019 | ret = -ETIME; |
2020 | break; |
2021 | case COMP_RESOURCE_ERROR: |
2022 | dev_warn(&udev->dev, |
2023 | "Not enough host controller resources for new device state.\n" ); |
2024 | ret = -ENOMEM; |
2025 | /* FIXME: can we allocate more resources for the HC? */ |
2026 | break; |
2027 | case COMP_BANDWIDTH_ERROR: |
2028 | case COMP_SECONDARY_BANDWIDTH_ERROR: |
2029 | dev_warn(&udev->dev, |
2030 | "Not enough bandwidth for new device state.\n" ); |
2031 | ret = -ENOSPC; |
2032 | /* FIXME: can we go back to the old state? */ |
2033 | break; |
2034 | case COMP_TRB_ERROR: |
2035 | /* the HCD set up something wrong */ |
2036 | dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, " |
2037 | "add flag = 1, " |
2038 | "and endpoint is not disabled.\n" ); |
2039 | ret = -EINVAL; |
2040 | break; |
2041 | case COMP_INCOMPATIBLE_DEVICE_ERROR: |
2042 | dev_warn(&udev->dev, |
2043 | "ERROR: Incompatible device for endpoint configure command.\n" ); |
2044 | ret = -ENODEV; |
2045 | break; |
2046 | case COMP_SUCCESS: |
2047 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_context_change, |
2048 | fmt: "Successful Endpoint Configure command" ); |
2049 | ret = 0; |
2050 | break; |
2051 | default: |
2052 | xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n" , |
2053 | *cmd_status); |
2054 | ret = -EINVAL; |
2055 | break; |
2056 | } |
2057 | return ret; |
2058 | } |
2059 | |
2060 | static int xhci_evaluate_context_result(struct xhci_hcd *xhci, |
2061 | struct usb_device *udev, u32 *cmd_status) |
2062 | { |
2063 | int ret; |
2064 | |
2065 | switch (*cmd_status) { |
2066 | case COMP_COMMAND_ABORTED: |
2067 | case COMP_COMMAND_RING_STOPPED: |
2068 | xhci_warn(xhci, "Timeout while waiting for evaluate context command\n" ); |
2069 | ret = -ETIME; |
2070 | break; |
2071 | case COMP_PARAMETER_ERROR: |
2072 | dev_warn(&udev->dev, |
2073 | "WARN: xHCI driver setup invalid evaluate context command.\n" ); |
2074 | ret = -EINVAL; |
2075 | break; |
2076 | case COMP_SLOT_NOT_ENABLED_ERROR: |
2077 | dev_warn(&udev->dev, |
2078 | "WARN: slot not enabled for evaluate context command.\n" ); |
2079 | ret = -EINVAL; |
2080 | break; |
2081 | case COMP_CONTEXT_STATE_ERROR: |
2082 | dev_warn(&udev->dev, |
2083 | "WARN: invalid context state for evaluate context command.\n" ); |
2084 | ret = -EINVAL; |
2085 | break; |
2086 | case COMP_INCOMPATIBLE_DEVICE_ERROR: |
2087 | dev_warn(&udev->dev, |
2088 | "ERROR: Incompatible device for evaluate context command.\n" ); |
2089 | ret = -ENODEV; |
2090 | break; |
2091 | case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR: |
2092 | /* Max Exit Latency too large error */ |
2093 | dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n" ); |
2094 | ret = -EINVAL; |
2095 | break; |
2096 | case COMP_SUCCESS: |
2097 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_context_change, |
2098 | fmt: "Successful evaluate context command" ); |
2099 | ret = 0; |
2100 | break; |
2101 | default: |
2102 | xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n" , |
2103 | *cmd_status); |
2104 | ret = -EINVAL; |
2105 | break; |
2106 | } |
2107 | return ret; |
2108 | } |
2109 | |
2110 | static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci, |
2111 | struct xhci_input_control_ctx *ctrl_ctx) |
2112 | { |
2113 | u32 valid_add_flags; |
2114 | u32 valid_drop_flags; |
2115 | |
2116 | /* Ignore the slot flag (bit 0), and the default control endpoint flag |
2117 | * (bit 1). The default control endpoint is added during the Address |
2118 | * Device command and is never removed until the slot is disabled. |
2119 | */ |
2120 | valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2; |
2121 | valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2; |
2122 | |
2123 | /* Use hweight32 to count the number of ones in the add flags, or |
2124 | * number of endpoints added. Don't count endpoints that are changed |
2125 | * (both added and dropped). |
2126 | */ |
2127 | return hweight32(valid_add_flags) - |
2128 | hweight32(valid_add_flags & valid_drop_flags); |
2129 | } |
2130 | |
2131 | static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci, |
2132 | struct xhci_input_control_ctx *ctrl_ctx) |
2133 | { |
2134 | u32 valid_add_flags; |
2135 | u32 valid_drop_flags; |
2136 | |
2137 | valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2; |
2138 | valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2; |
2139 | |
2140 | return hweight32(valid_drop_flags) - |
2141 | hweight32(valid_add_flags & valid_drop_flags); |
2142 | } |
2143 | |
2144 | /* |
2145 | * We need to reserve the new number of endpoints before the configure endpoint |
2146 | * command completes. We can't subtract the dropped endpoints from the number |
2147 | * of active endpoints until the command completes because we can oversubscribe |
2148 | * the host in this case: |
2149 | * |
2150 | * - the first configure endpoint command drops more endpoints than it adds |
2151 | * - a second configure endpoint command that adds more endpoints is queued |
2152 | * - the first configure endpoint command fails, so the config is unchanged |
2153 | * - the second command may succeed, even though there isn't enough resources |
2154 | * |
2155 | * Must be called with xhci->lock held. |
2156 | */ |
2157 | static int xhci_reserve_host_resources(struct xhci_hcd *xhci, |
2158 | struct xhci_input_control_ctx *ctrl_ctx) |
2159 | { |
2160 | u32 added_eps; |
2161 | |
2162 | added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); |
2163 | if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) { |
2164 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
2165 | fmt: "Not enough ep ctxs: " |
2166 | "%u active, need to add %u, limit is %u." , |
2167 | xhci->num_active_eps, added_eps, |
2168 | xhci->limit_active_eps); |
2169 | return -ENOMEM; |
2170 | } |
2171 | xhci->num_active_eps += added_eps; |
2172 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
2173 | fmt: "Adding %u ep ctxs, %u now active." , added_eps, |
2174 | xhci->num_active_eps); |
2175 | return 0; |
2176 | } |
2177 | |
2178 | /* |
2179 | * The configure endpoint was failed by the xHC for some other reason, so we |
2180 | * need to revert the resources that failed configuration would have used. |
2181 | * |
2182 | * Must be called with xhci->lock held. |
2183 | */ |
2184 | static void xhci_free_host_resources(struct xhci_hcd *xhci, |
2185 | struct xhci_input_control_ctx *ctrl_ctx) |
2186 | { |
2187 | u32 num_failed_eps; |
2188 | |
2189 | num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); |
2190 | xhci->num_active_eps -= num_failed_eps; |
2191 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
2192 | fmt: "Removing %u failed ep ctxs, %u now active." , |
2193 | num_failed_eps, |
2194 | xhci->num_active_eps); |
2195 | } |
2196 | |
2197 | /* |
2198 | * Now that the command has completed, clean up the active endpoint count by |
2199 | * subtracting out the endpoints that were dropped (but not changed). |
2200 | * |
2201 | * Must be called with xhci->lock held. |
2202 | */ |
2203 | static void xhci_finish_resource_reservation(struct xhci_hcd *xhci, |
2204 | struct xhci_input_control_ctx *ctrl_ctx) |
2205 | { |
2206 | u32 num_dropped_eps; |
2207 | |
2208 | num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx); |
2209 | xhci->num_active_eps -= num_dropped_eps; |
2210 | if (num_dropped_eps) |
2211 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
2212 | fmt: "Removing %u dropped ep ctxs, %u now active." , |
2213 | num_dropped_eps, |
2214 | xhci->num_active_eps); |
2215 | } |
2216 | |
2217 | static unsigned int xhci_get_block_size(struct usb_device *udev) |
2218 | { |
2219 | switch (udev->speed) { |
2220 | case USB_SPEED_LOW: |
2221 | case USB_SPEED_FULL: |
2222 | return FS_BLOCK; |
2223 | case USB_SPEED_HIGH: |
2224 | return HS_BLOCK; |
2225 | case USB_SPEED_SUPER: |
2226 | case USB_SPEED_SUPER_PLUS: |
2227 | return SS_BLOCK; |
2228 | case USB_SPEED_UNKNOWN: |
2229 | default: |
2230 | /* Should never happen */ |
2231 | return 1; |
2232 | } |
2233 | } |
2234 | |
2235 | static unsigned int |
2236 | xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw) |
2237 | { |
2238 | if (interval_bw->overhead[LS_OVERHEAD_TYPE]) |
2239 | return LS_OVERHEAD; |
2240 | if (interval_bw->overhead[FS_OVERHEAD_TYPE]) |
2241 | return FS_OVERHEAD; |
2242 | return HS_OVERHEAD; |
2243 | } |
2244 | |
2245 | /* If we are changing a LS/FS device under a HS hub, |
2246 | * make sure (if we are activating a new TT) that the HS bus has enough |
2247 | * bandwidth for this new TT. |
2248 | */ |
2249 | static int xhci_check_tt_bw_table(struct xhci_hcd *xhci, |
2250 | struct xhci_virt_device *virt_dev, |
2251 | int old_active_eps) |
2252 | { |
2253 | struct xhci_interval_bw_table *bw_table; |
2254 | struct xhci_tt_bw_info *tt_info; |
2255 | |
2256 | /* Find the bandwidth table for the root port this TT is attached to. */ |
2257 | bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table; |
2258 | tt_info = virt_dev->tt_info; |
2259 | /* If this TT already had active endpoints, the bandwidth for this TT |
2260 | * has already been added. Removing all periodic endpoints (and thus |
2261 | * making the TT enactive) will only decrease the bandwidth used. |
2262 | */ |
2263 | if (old_active_eps) |
2264 | return 0; |
2265 | if (old_active_eps == 0 && tt_info->active_eps != 0) { |
2266 | if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT) |
2267 | return -ENOMEM; |
2268 | return 0; |
2269 | } |
2270 | /* Not sure why we would have no new active endpoints... |
2271 | * |
2272 | * Maybe because of an Evaluate Context change for a hub update or a |
2273 | * control endpoint 0 max packet size change? |
2274 | * FIXME: skip the bandwidth calculation in that case. |
2275 | */ |
2276 | return 0; |
2277 | } |
2278 | |
2279 | static int xhci_check_ss_bw(struct xhci_hcd *xhci, |
2280 | struct xhci_virt_device *virt_dev) |
2281 | { |
2282 | unsigned int bw_reserved; |
2283 | |
2284 | bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100); |
2285 | if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved)) |
2286 | return -ENOMEM; |
2287 | |
2288 | bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100); |
2289 | if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved)) |
2290 | return -ENOMEM; |
2291 | |
2292 | return 0; |
2293 | } |
2294 | |
2295 | /* |
2296 | * This algorithm is a very conservative estimate of the worst-case scheduling |
2297 | * scenario for any one interval. The hardware dynamically schedules the |
2298 | * packets, so we can't tell which microframe could be the limiting factor in |
2299 | * the bandwidth scheduling. This only takes into account periodic endpoints. |
2300 | * |
2301 | * Obviously, we can't solve an NP complete problem to find the minimum worst |
2302 | * case scenario. Instead, we come up with an estimate that is no less than |
2303 | * the worst case bandwidth used for any one microframe, but may be an |
2304 | * over-estimate. |
2305 | * |
2306 | * We walk the requirements for each endpoint by interval, starting with the |
2307 | * smallest interval, and place packets in the schedule where there is only one |
2308 | * possible way to schedule packets for that interval. In order to simplify |
2309 | * this algorithm, we record the largest max packet size for each interval, and |
2310 | * assume all packets will be that size. |
2311 | * |
2312 | * For interval 0, we obviously must schedule all packets for each interval. |
2313 | * The bandwidth for interval 0 is just the amount of data to be transmitted |
2314 | * (the sum of all max ESIT payload sizes, plus any overhead per packet times |
2315 | * the number of packets). |
2316 | * |
2317 | * For interval 1, we have two possible microframes to schedule those packets |
2318 | * in. For this algorithm, if we can schedule the same number of packets for |
2319 | * each possible scheduling opportunity (each microframe), we will do so. The |
2320 | * remaining number of packets will be saved to be transmitted in the gaps in |
2321 | * the next interval's scheduling sequence. |
2322 | * |
2323 | * As we move those remaining packets to be scheduled with interval 2 packets, |
2324 | * we have to double the number of remaining packets to transmit. This is |
2325 | * because the intervals are actually powers of 2, and we would be transmitting |
2326 | * the previous interval's packets twice in this interval. We also have to be |
2327 | * sure that when we look at the largest max packet size for this interval, we |
2328 | * also look at the largest max packet size for the remaining packets and take |
2329 | * the greater of the two. |
2330 | * |
2331 | * The algorithm continues to evenly distribute packets in each scheduling |
2332 | * opportunity, and push the remaining packets out, until we get to the last |
2333 | * interval. Then those packets and their associated overhead are just added |
2334 | * to the bandwidth used. |
2335 | */ |
2336 | static int xhci_check_bw_table(struct xhci_hcd *xhci, |
2337 | struct xhci_virt_device *virt_dev, |
2338 | int old_active_eps) |
2339 | { |
2340 | unsigned int bw_reserved; |
2341 | unsigned int max_bandwidth; |
2342 | unsigned int bw_used; |
2343 | unsigned int block_size; |
2344 | struct xhci_interval_bw_table *bw_table; |
2345 | unsigned int packet_size = 0; |
2346 | unsigned int overhead = 0; |
2347 | unsigned int packets_transmitted = 0; |
2348 | unsigned int packets_remaining = 0; |
2349 | unsigned int i; |
2350 | |
2351 | if (virt_dev->udev->speed >= USB_SPEED_SUPER) |
2352 | return xhci_check_ss_bw(xhci, virt_dev); |
2353 | |
2354 | if (virt_dev->udev->speed == USB_SPEED_HIGH) { |
2355 | max_bandwidth = HS_BW_LIMIT; |
2356 | /* Convert percent of bus BW reserved to blocks reserved */ |
2357 | bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100); |
2358 | } else { |
2359 | max_bandwidth = FS_BW_LIMIT; |
2360 | bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100); |
2361 | } |
2362 | |
2363 | bw_table = virt_dev->bw_table; |
2364 | /* We need to translate the max packet size and max ESIT payloads into |
2365 | * the units the hardware uses. |
2366 | */ |
2367 | block_size = xhci_get_block_size(udev: virt_dev->udev); |
2368 | |
2369 | /* If we are manipulating a LS/FS device under a HS hub, double check |
2370 | * that the HS bus has enough bandwidth if we are activing a new TT. |
2371 | */ |
2372 | if (virt_dev->tt_info) { |
2373 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
2374 | fmt: "Recalculating BW for rootport %u" , |
2375 | virt_dev->real_port); |
2376 | if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) { |
2377 | xhci_warn(xhci, "Not enough bandwidth on HS bus for " |
2378 | "newly activated TT.\n" ); |
2379 | return -ENOMEM; |
2380 | } |
2381 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
2382 | fmt: "Recalculating BW for TT slot %u port %u" , |
2383 | virt_dev->tt_info->slot_id, |
2384 | virt_dev->tt_info->ttport); |
2385 | } else { |
2386 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
2387 | fmt: "Recalculating BW for rootport %u" , |
2388 | virt_dev->real_port); |
2389 | } |
2390 | |
2391 | /* Add in how much bandwidth will be used for interval zero, or the |
2392 | * rounded max ESIT payload + number of packets * largest overhead. |
2393 | */ |
2394 | bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) + |
2395 | bw_table->interval_bw[0].num_packets * |
2396 | xhci_get_largest_overhead(interval_bw: &bw_table->interval_bw[0]); |
2397 | |
2398 | for (i = 1; i < XHCI_MAX_INTERVAL; i++) { |
2399 | unsigned int bw_added; |
2400 | unsigned int largest_mps; |
2401 | unsigned int interval_overhead; |
2402 | |
2403 | /* |
2404 | * How many packets could we transmit in this interval? |
2405 | * If packets didn't fit in the previous interval, we will need |
2406 | * to transmit that many packets twice within this interval. |
2407 | */ |
2408 | packets_remaining = 2 * packets_remaining + |
2409 | bw_table->interval_bw[i].num_packets; |
2410 | |
2411 | /* Find the largest max packet size of this or the previous |
2412 | * interval. |
2413 | */ |
2414 | if (list_empty(head: &bw_table->interval_bw[i].endpoints)) |
2415 | largest_mps = 0; |
2416 | else { |
2417 | struct xhci_virt_ep *virt_ep; |
2418 | struct list_head *ep_entry; |
2419 | |
2420 | ep_entry = bw_table->interval_bw[i].endpoints.next; |
2421 | virt_ep = list_entry(ep_entry, |
2422 | struct xhci_virt_ep, bw_endpoint_list); |
2423 | /* Convert to blocks, rounding up */ |
2424 | largest_mps = DIV_ROUND_UP( |
2425 | virt_ep->bw_info.max_packet_size, |
2426 | block_size); |
2427 | } |
2428 | if (largest_mps > packet_size) |
2429 | packet_size = largest_mps; |
2430 | |
2431 | /* Use the larger overhead of this or the previous interval. */ |
2432 | interval_overhead = xhci_get_largest_overhead( |
2433 | interval_bw: &bw_table->interval_bw[i]); |
2434 | if (interval_overhead > overhead) |
2435 | overhead = interval_overhead; |
2436 | |
2437 | /* How many packets can we evenly distribute across |
2438 | * (1 << (i + 1)) possible scheduling opportunities? |
2439 | */ |
2440 | packets_transmitted = packets_remaining >> (i + 1); |
2441 | |
2442 | /* Add in the bandwidth used for those scheduled packets */ |
2443 | bw_added = packets_transmitted * (overhead + packet_size); |
2444 | |
2445 | /* How many packets do we have remaining to transmit? */ |
2446 | packets_remaining = packets_remaining % (1 << (i + 1)); |
2447 | |
2448 | /* What largest max packet size should those packets have? */ |
2449 | /* If we've transmitted all packets, don't carry over the |
2450 | * largest packet size. |
2451 | */ |
2452 | if (packets_remaining == 0) { |
2453 | packet_size = 0; |
2454 | overhead = 0; |
2455 | } else if (packets_transmitted > 0) { |
2456 | /* Otherwise if we do have remaining packets, and we've |
2457 | * scheduled some packets in this interval, take the |
2458 | * largest max packet size from endpoints with this |
2459 | * interval. |
2460 | */ |
2461 | packet_size = largest_mps; |
2462 | overhead = interval_overhead; |
2463 | } |
2464 | /* Otherwise carry over packet_size and overhead from the last |
2465 | * time we had a remainder. |
2466 | */ |
2467 | bw_used += bw_added; |
2468 | if (bw_used > max_bandwidth) { |
2469 | xhci_warn(xhci, "Not enough bandwidth. " |
2470 | "Proposed: %u, Max: %u\n" , |
2471 | bw_used, max_bandwidth); |
2472 | return -ENOMEM; |
2473 | } |
2474 | } |
2475 | /* |
2476 | * Ok, we know we have some packets left over after even-handedly |
2477 | * scheduling interval 15. We don't know which microframes they will |
2478 | * fit into, so we over-schedule and say they will be scheduled every |
2479 | * microframe. |
2480 | */ |
2481 | if (packets_remaining > 0) |
2482 | bw_used += overhead + packet_size; |
2483 | |
2484 | if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) { |
2485 | unsigned int port_index = virt_dev->real_port - 1; |
2486 | |
2487 | /* OK, we're manipulating a HS device attached to a |
2488 | * root port bandwidth domain. Include the number of active TTs |
2489 | * in the bandwidth used. |
2490 | */ |
2491 | bw_used += TT_HS_OVERHEAD * |
2492 | xhci->rh_bw[port_index].num_active_tts; |
2493 | } |
2494 | |
2495 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
2496 | fmt: "Final bandwidth: %u, Limit: %u, Reserved: %u, " |
2497 | "Available: %u " "percent" , |
2498 | bw_used, max_bandwidth, bw_reserved, |
2499 | (max_bandwidth - bw_used - bw_reserved) * 100 / |
2500 | max_bandwidth); |
2501 | |
2502 | bw_used += bw_reserved; |
2503 | if (bw_used > max_bandwidth) { |
2504 | xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n" , |
2505 | bw_used, max_bandwidth); |
2506 | return -ENOMEM; |
2507 | } |
2508 | |
2509 | bw_table->bw_used = bw_used; |
2510 | return 0; |
2511 | } |
2512 | |
2513 | static bool xhci_is_async_ep(unsigned int ep_type) |
2514 | { |
2515 | return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP && |
2516 | ep_type != ISOC_IN_EP && |
2517 | ep_type != INT_IN_EP); |
2518 | } |
2519 | |
2520 | static bool xhci_is_sync_in_ep(unsigned int ep_type) |
2521 | { |
2522 | return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP); |
2523 | } |
2524 | |
2525 | static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw) |
2526 | { |
2527 | unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK); |
2528 | |
2529 | if (ep_bw->ep_interval == 0) |
2530 | return SS_OVERHEAD_BURST + |
2531 | (ep_bw->mult * ep_bw->num_packets * |
2532 | (SS_OVERHEAD + mps)); |
2533 | return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets * |
2534 | (SS_OVERHEAD + mps + SS_OVERHEAD_BURST), |
2535 | 1 << ep_bw->ep_interval); |
2536 | |
2537 | } |
2538 | |
2539 | static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci, |
2540 | struct xhci_bw_info *ep_bw, |
2541 | struct xhci_interval_bw_table *bw_table, |
2542 | struct usb_device *udev, |
2543 | struct xhci_virt_ep *virt_ep, |
2544 | struct xhci_tt_bw_info *tt_info) |
2545 | { |
2546 | struct xhci_interval_bw *interval_bw; |
2547 | int normalized_interval; |
2548 | |
2549 | if (xhci_is_async_ep(ep_type: ep_bw->type)) |
2550 | return; |
2551 | |
2552 | if (udev->speed >= USB_SPEED_SUPER) { |
2553 | if (xhci_is_sync_in_ep(ep_type: ep_bw->type)) |
2554 | xhci->devs[udev->slot_id]->bw_table->ss_bw_in -= |
2555 | xhci_get_ss_bw_consumed(ep_bw); |
2556 | else |
2557 | xhci->devs[udev->slot_id]->bw_table->ss_bw_out -= |
2558 | xhci_get_ss_bw_consumed(ep_bw); |
2559 | return; |
2560 | } |
2561 | |
2562 | /* SuperSpeed endpoints never get added to intervals in the table, so |
2563 | * this check is only valid for HS/FS/LS devices. |
2564 | */ |
2565 | if (list_empty(head: &virt_ep->bw_endpoint_list)) |
2566 | return; |
2567 | /* For LS/FS devices, we need to translate the interval expressed in |
2568 | * microframes to frames. |
2569 | */ |
2570 | if (udev->speed == USB_SPEED_HIGH) |
2571 | normalized_interval = ep_bw->ep_interval; |
2572 | else |
2573 | normalized_interval = ep_bw->ep_interval - 3; |
2574 | |
2575 | if (normalized_interval == 0) |
2576 | bw_table->interval0_esit_payload -= ep_bw->max_esit_payload; |
2577 | interval_bw = &bw_table->interval_bw[normalized_interval]; |
2578 | interval_bw->num_packets -= ep_bw->num_packets; |
2579 | switch (udev->speed) { |
2580 | case USB_SPEED_LOW: |
2581 | interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1; |
2582 | break; |
2583 | case USB_SPEED_FULL: |
2584 | interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1; |
2585 | break; |
2586 | case USB_SPEED_HIGH: |
2587 | interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1; |
2588 | break; |
2589 | default: |
2590 | /* Should never happen because only LS/FS/HS endpoints will get |
2591 | * added to the endpoint list. |
2592 | */ |
2593 | return; |
2594 | } |
2595 | if (tt_info) |
2596 | tt_info->active_eps -= 1; |
2597 | list_del_init(entry: &virt_ep->bw_endpoint_list); |
2598 | } |
2599 | |
2600 | static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci, |
2601 | struct xhci_bw_info *ep_bw, |
2602 | struct xhci_interval_bw_table *bw_table, |
2603 | struct usb_device *udev, |
2604 | struct xhci_virt_ep *virt_ep, |
2605 | struct xhci_tt_bw_info *tt_info) |
2606 | { |
2607 | struct xhci_interval_bw *interval_bw; |
2608 | struct xhci_virt_ep *smaller_ep; |
2609 | int normalized_interval; |
2610 | |
2611 | if (xhci_is_async_ep(ep_type: ep_bw->type)) |
2612 | return; |
2613 | |
2614 | if (udev->speed == USB_SPEED_SUPER) { |
2615 | if (xhci_is_sync_in_ep(ep_type: ep_bw->type)) |
2616 | xhci->devs[udev->slot_id]->bw_table->ss_bw_in += |
2617 | xhci_get_ss_bw_consumed(ep_bw); |
2618 | else |
2619 | xhci->devs[udev->slot_id]->bw_table->ss_bw_out += |
2620 | xhci_get_ss_bw_consumed(ep_bw); |
2621 | return; |
2622 | } |
2623 | |
2624 | /* For LS/FS devices, we need to translate the interval expressed in |
2625 | * microframes to frames. |
2626 | */ |
2627 | if (udev->speed == USB_SPEED_HIGH) |
2628 | normalized_interval = ep_bw->ep_interval; |
2629 | else |
2630 | normalized_interval = ep_bw->ep_interval - 3; |
2631 | |
2632 | if (normalized_interval == 0) |
2633 | bw_table->interval0_esit_payload += ep_bw->max_esit_payload; |
2634 | interval_bw = &bw_table->interval_bw[normalized_interval]; |
2635 | interval_bw->num_packets += ep_bw->num_packets; |
2636 | switch (udev->speed) { |
2637 | case USB_SPEED_LOW: |
2638 | interval_bw->overhead[LS_OVERHEAD_TYPE] += 1; |
2639 | break; |
2640 | case USB_SPEED_FULL: |
2641 | interval_bw->overhead[FS_OVERHEAD_TYPE] += 1; |
2642 | break; |
2643 | case USB_SPEED_HIGH: |
2644 | interval_bw->overhead[HS_OVERHEAD_TYPE] += 1; |
2645 | break; |
2646 | default: |
2647 | /* Should never happen because only LS/FS/HS endpoints will get |
2648 | * added to the endpoint list. |
2649 | */ |
2650 | return; |
2651 | } |
2652 | |
2653 | if (tt_info) |
2654 | tt_info->active_eps += 1; |
2655 | /* Insert the endpoint into the list, largest max packet size first. */ |
2656 | list_for_each_entry(smaller_ep, &interval_bw->endpoints, |
2657 | bw_endpoint_list) { |
2658 | if (ep_bw->max_packet_size >= |
2659 | smaller_ep->bw_info.max_packet_size) { |
2660 | /* Add the new ep before the smaller endpoint */ |
2661 | list_add_tail(new: &virt_ep->bw_endpoint_list, |
2662 | head: &smaller_ep->bw_endpoint_list); |
2663 | return; |
2664 | } |
2665 | } |
2666 | /* Add the new endpoint at the end of the list. */ |
2667 | list_add_tail(new: &virt_ep->bw_endpoint_list, |
2668 | head: &interval_bw->endpoints); |
2669 | } |
2670 | |
2671 | void xhci_update_tt_active_eps(struct xhci_hcd *xhci, |
2672 | struct xhci_virt_device *virt_dev, |
2673 | int old_active_eps) |
2674 | { |
2675 | struct xhci_root_port_bw_info *rh_bw_info; |
2676 | if (!virt_dev->tt_info) |
2677 | return; |
2678 | |
2679 | rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1]; |
2680 | if (old_active_eps == 0 && |
2681 | virt_dev->tt_info->active_eps != 0) { |
2682 | rh_bw_info->num_active_tts += 1; |
2683 | rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD; |
2684 | } else if (old_active_eps != 0 && |
2685 | virt_dev->tt_info->active_eps == 0) { |
2686 | rh_bw_info->num_active_tts -= 1; |
2687 | rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD; |
2688 | } |
2689 | } |
2690 | |
2691 | static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, |
2692 | struct xhci_virt_device *virt_dev, |
2693 | struct xhci_container_ctx *in_ctx) |
2694 | { |
2695 | struct xhci_bw_info ep_bw_info[31]; |
2696 | int i; |
2697 | struct xhci_input_control_ctx *ctrl_ctx; |
2698 | int old_active_eps = 0; |
2699 | |
2700 | if (virt_dev->tt_info) |
2701 | old_active_eps = virt_dev->tt_info->active_eps; |
2702 | |
2703 | ctrl_ctx = xhci_get_input_control_ctx(ctx: in_ctx); |
2704 | if (!ctrl_ctx) { |
2705 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n" , |
2706 | __func__); |
2707 | return -ENOMEM; |
2708 | } |
2709 | |
2710 | for (i = 0; i < 31; i++) { |
2711 | if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) |
2712 | continue; |
2713 | |
2714 | /* Make a copy of the BW info in case we need to revert this */ |
2715 | memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info, |
2716 | sizeof(ep_bw_info[i])); |
2717 | /* Drop the endpoint from the interval table if the endpoint is |
2718 | * being dropped or changed. |
2719 | */ |
2720 | if (EP_IS_DROPPED(ctrl_ctx, i)) |
2721 | xhci_drop_ep_from_interval_table(xhci, |
2722 | ep_bw: &virt_dev->eps[i].bw_info, |
2723 | bw_table: virt_dev->bw_table, |
2724 | udev: virt_dev->udev, |
2725 | virt_ep: &virt_dev->eps[i], |
2726 | tt_info: virt_dev->tt_info); |
2727 | } |
2728 | /* Overwrite the information stored in the endpoints' bw_info */ |
2729 | xhci_update_bw_info(xhci, in_ctx: virt_dev->in_ctx, ctrl_ctx, virt_dev); |
2730 | for (i = 0; i < 31; i++) { |
2731 | /* Add any changed or added endpoints to the interval table */ |
2732 | if (EP_IS_ADDED(ctrl_ctx, i)) |
2733 | xhci_add_ep_to_interval_table(xhci, |
2734 | ep_bw: &virt_dev->eps[i].bw_info, |
2735 | bw_table: virt_dev->bw_table, |
2736 | udev: virt_dev->udev, |
2737 | virt_ep: &virt_dev->eps[i], |
2738 | tt_info: virt_dev->tt_info); |
2739 | } |
2740 | |
2741 | if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) { |
2742 | /* Ok, this fits in the bandwidth we have. |
2743 | * Update the number of active TTs. |
2744 | */ |
2745 | xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); |
2746 | return 0; |
2747 | } |
2748 | |
2749 | /* We don't have enough bandwidth for this, revert the stored info. */ |
2750 | for (i = 0; i < 31; i++) { |
2751 | if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) |
2752 | continue; |
2753 | |
2754 | /* Drop the new copies of any added or changed endpoints from |
2755 | * the interval table. |
2756 | */ |
2757 | if (EP_IS_ADDED(ctrl_ctx, i)) { |
2758 | xhci_drop_ep_from_interval_table(xhci, |
2759 | ep_bw: &virt_dev->eps[i].bw_info, |
2760 | bw_table: virt_dev->bw_table, |
2761 | udev: virt_dev->udev, |
2762 | virt_ep: &virt_dev->eps[i], |
2763 | tt_info: virt_dev->tt_info); |
2764 | } |
2765 | /* Revert the endpoint back to its old information */ |
2766 | memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i], |
2767 | sizeof(ep_bw_info[i])); |
2768 | /* Add any changed or dropped endpoints back into the table */ |
2769 | if (EP_IS_DROPPED(ctrl_ctx, i)) |
2770 | xhci_add_ep_to_interval_table(xhci, |
2771 | ep_bw: &virt_dev->eps[i].bw_info, |
2772 | bw_table: virt_dev->bw_table, |
2773 | udev: virt_dev->udev, |
2774 | virt_ep: &virt_dev->eps[i], |
2775 | tt_info: virt_dev->tt_info); |
2776 | } |
2777 | return -ENOMEM; |
2778 | } |
2779 | |
2780 | |
2781 | /* Issue a configure endpoint command or evaluate context command |
2782 | * and wait for it to finish. |
2783 | */ |
2784 | static int xhci_configure_endpoint(struct xhci_hcd *xhci, |
2785 | struct usb_device *udev, |
2786 | struct xhci_command *command, |
2787 | bool ctx_change, bool must_succeed) |
2788 | { |
2789 | int ret; |
2790 | unsigned long flags; |
2791 | struct xhci_input_control_ctx *ctrl_ctx; |
2792 | struct xhci_virt_device *virt_dev; |
2793 | struct xhci_slot_ctx *slot_ctx; |
2794 | |
2795 | if (!command) |
2796 | return -EINVAL; |
2797 | |
2798 | spin_lock_irqsave(&xhci->lock, flags); |
2799 | |
2800 | if (xhci->xhc_state & XHCI_STATE_DYING) { |
2801 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
2802 | return -ESHUTDOWN; |
2803 | } |
2804 | |
2805 | virt_dev = xhci->devs[udev->slot_id]; |
2806 | |
2807 | ctrl_ctx = xhci_get_input_control_ctx(ctx: command->in_ctx); |
2808 | if (!ctrl_ctx) { |
2809 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
2810 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n" , |
2811 | __func__); |
2812 | return -ENOMEM; |
2813 | } |
2814 | |
2815 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && |
2816 | xhci_reserve_host_resources(xhci, ctrl_ctx)) { |
2817 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
2818 | xhci_warn(xhci, "Not enough host resources, " |
2819 | "active endpoint contexts = %u\n" , |
2820 | xhci->num_active_eps); |
2821 | return -ENOMEM; |
2822 | } |
2823 | if ((xhci->quirks & XHCI_SW_BW_CHECKING) && |
2824 | xhci_reserve_bandwidth(xhci, virt_dev, in_ctx: command->in_ctx)) { |
2825 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) |
2826 | xhci_free_host_resources(xhci, ctrl_ctx); |
2827 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
2828 | xhci_warn(xhci, "Not enough bandwidth\n" ); |
2829 | return -ENOMEM; |
2830 | } |
2831 | |
2832 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: command->in_ctx); |
2833 | |
2834 | trace_xhci_configure_endpoint_ctrl_ctx(ctrl_ctx); |
2835 | trace_xhci_configure_endpoint(ctx: slot_ctx); |
2836 | |
2837 | if (!ctx_change) |
2838 | ret = xhci_queue_configure_endpoint(xhci, cmd: command, |
2839 | in_ctx_ptr: command->in_ctx->dma, |
2840 | slot_id: udev->slot_id, command_must_succeed: must_succeed); |
2841 | else |
2842 | ret = xhci_queue_evaluate_context(xhci, cmd: command, |
2843 | in_ctx_ptr: command->in_ctx->dma, |
2844 | slot_id: udev->slot_id, command_must_succeed: must_succeed); |
2845 | if (ret < 0) { |
2846 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) |
2847 | xhci_free_host_resources(xhci, ctrl_ctx); |
2848 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
2849 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_context_change, |
2850 | fmt: "FIXME allocate a new ring segment" ); |
2851 | return -ENOMEM; |
2852 | } |
2853 | xhci_ring_cmd_db(xhci); |
2854 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
2855 | |
2856 | /* Wait for the configure endpoint command to complete */ |
2857 | wait_for_completion(command->completion); |
2858 | |
2859 | if (!ctx_change) |
2860 | ret = xhci_configure_endpoint_result(xhci, udev, |
2861 | cmd_status: &command->status); |
2862 | else |
2863 | ret = xhci_evaluate_context_result(xhci, udev, |
2864 | cmd_status: &command->status); |
2865 | |
2866 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { |
2867 | spin_lock_irqsave(&xhci->lock, flags); |
2868 | /* If the command failed, remove the reserved resources. |
2869 | * Otherwise, clean up the estimate to include dropped eps. |
2870 | */ |
2871 | if (ret) |
2872 | xhci_free_host_resources(xhci, ctrl_ctx); |
2873 | else |
2874 | xhci_finish_resource_reservation(xhci, ctrl_ctx); |
2875 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
2876 | } |
2877 | return ret; |
2878 | } |
2879 | |
2880 | static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci, |
2881 | struct xhci_virt_device *vdev, int i) |
2882 | { |
2883 | struct xhci_virt_ep *ep = &vdev->eps[i]; |
2884 | |
2885 | if (ep->ep_state & EP_HAS_STREAMS) { |
2886 | xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n" , |
2887 | xhci_get_endpoint_address(i)); |
2888 | xhci_free_stream_info(xhci, stream_info: ep->stream_info); |
2889 | ep->stream_info = NULL; |
2890 | ep->ep_state &= ~EP_HAS_STREAMS; |
2891 | } |
2892 | } |
2893 | |
2894 | /* Called after one or more calls to xhci_add_endpoint() or |
2895 | * xhci_drop_endpoint(). If this call fails, the USB core is expected |
2896 | * to call xhci_reset_bandwidth(). |
2897 | * |
2898 | * Since we are in the middle of changing either configuration or |
2899 | * installing a new alt setting, the USB core won't allow URBs to be |
2900 | * enqueued for any endpoint on the old config or interface. Nothing |
2901 | * else should be touching the xhci->devs[slot_id] structure, so we |
2902 | * don't need to take the xhci->lock for manipulating that. |
2903 | */ |
2904 | int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) |
2905 | { |
2906 | int i; |
2907 | int ret = 0; |
2908 | struct xhci_hcd *xhci; |
2909 | struct xhci_virt_device *virt_dev; |
2910 | struct xhci_input_control_ctx *ctrl_ctx; |
2911 | struct xhci_slot_ctx *slot_ctx; |
2912 | struct xhci_command *command; |
2913 | |
2914 | ret = xhci_check_args(hcd, udev, NULL, check_ep: 0, check_virt_dev: true, func: __func__); |
2915 | if (ret <= 0) |
2916 | return ret; |
2917 | xhci = hcd_to_xhci(hcd); |
2918 | if ((xhci->xhc_state & XHCI_STATE_DYING) || |
2919 | (xhci->xhc_state & XHCI_STATE_REMOVING)) |
2920 | return -ENODEV; |
2921 | |
2922 | xhci_dbg(xhci, "%s called for udev %p\n" , __func__, udev); |
2923 | virt_dev = xhci->devs[udev->slot_id]; |
2924 | |
2925 | command = xhci_alloc_command(xhci, allocate_completion: true, GFP_KERNEL); |
2926 | if (!command) |
2927 | return -ENOMEM; |
2928 | |
2929 | command->in_ctx = virt_dev->in_ctx; |
2930 | |
2931 | /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ |
2932 | ctrl_ctx = xhci_get_input_control_ctx(ctx: command->in_ctx); |
2933 | if (!ctrl_ctx) { |
2934 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n" , |
2935 | __func__); |
2936 | ret = -ENOMEM; |
2937 | goto command_cleanup; |
2938 | } |
2939 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
2940 | ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); |
2941 | ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); |
2942 | |
2943 | /* Don't issue the command if there's no endpoints to update. */ |
2944 | if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) && |
2945 | ctrl_ctx->drop_flags == 0) { |
2946 | ret = 0; |
2947 | goto command_cleanup; |
2948 | } |
2949 | /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */ |
2950 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: virt_dev->in_ctx); |
2951 | for (i = 31; i >= 1; i--) { |
2952 | __le32 le32 = cpu_to_le32(BIT(i)); |
2953 | |
2954 | if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32)) |
2955 | || (ctrl_ctx->add_flags & le32) || i == 1) { |
2956 | slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); |
2957 | slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i)); |
2958 | break; |
2959 | } |
2960 | } |
2961 | |
2962 | ret = xhci_configure_endpoint(xhci, udev, command, |
2963 | ctx_change: false, must_succeed: false); |
2964 | if (ret) |
2965 | /* Callee should call reset_bandwidth() */ |
2966 | goto command_cleanup; |
2967 | |
2968 | /* Free any rings that were dropped, but not changed. */ |
2969 | for (i = 1; i < 31; i++) { |
2970 | if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) && |
2971 | !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) { |
2972 | xhci_free_endpoint_ring(xhci, virt_dev, ep_index: i); |
2973 | xhci_check_bw_drop_ep_streams(xhci, vdev: virt_dev, i); |
2974 | } |
2975 | } |
2976 | xhci_zero_in_ctx(xhci, virt_dev); |
2977 | /* |
2978 | * Install any rings for completely new endpoints or changed endpoints, |
2979 | * and free any old rings from changed endpoints. |
2980 | */ |
2981 | for (i = 1; i < 31; i++) { |
2982 | if (!virt_dev->eps[i].new_ring) |
2983 | continue; |
2984 | /* Only free the old ring if it exists. |
2985 | * It may not if this is the first add of an endpoint. |
2986 | */ |
2987 | if (virt_dev->eps[i].ring) { |
2988 | xhci_free_endpoint_ring(xhci, virt_dev, ep_index: i); |
2989 | } |
2990 | xhci_check_bw_drop_ep_streams(xhci, vdev: virt_dev, i); |
2991 | virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; |
2992 | virt_dev->eps[i].new_ring = NULL; |
2993 | xhci_debugfs_create_endpoint(xhci, virt_dev, ep_index: i); |
2994 | } |
2995 | command_cleanup: |
2996 | kfree(objp: command->completion); |
2997 | kfree(objp: command); |
2998 | |
2999 | return ret; |
3000 | } |
3001 | EXPORT_SYMBOL_GPL(xhci_check_bandwidth); |
3002 | |
3003 | void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) |
3004 | { |
3005 | struct xhci_hcd *xhci; |
3006 | struct xhci_virt_device *virt_dev; |
3007 | int i, ret; |
3008 | |
3009 | ret = xhci_check_args(hcd, udev, NULL, check_ep: 0, check_virt_dev: true, func: __func__); |
3010 | if (ret <= 0) |
3011 | return; |
3012 | xhci = hcd_to_xhci(hcd); |
3013 | |
3014 | xhci_dbg(xhci, "%s called for udev %p\n" , __func__, udev); |
3015 | virt_dev = xhci->devs[udev->slot_id]; |
3016 | /* Free any rings allocated for added endpoints */ |
3017 | for (i = 0; i < 31; i++) { |
3018 | if (virt_dev->eps[i].new_ring) { |
3019 | xhci_debugfs_remove_endpoint(xhci, virt_dev, ep_index: i); |
3020 | xhci_ring_free(xhci, ring: virt_dev->eps[i].new_ring); |
3021 | virt_dev->eps[i].new_ring = NULL; |
3022 | } |
3023 | } |
3024 | xhci_zero_in_ctx(xhci, virt_dev); |
3025 | } |
3026 | EXPORT_SYMBOL_GPL(xhci_reset_bandwidth); |
3027 | |
3028 | static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, |
3029 | struct xhci_container_ctx *in_ctx, |
3030 | struct xhci_container_ctx *out_ctx, |
3031 | struct xhci_input_control_ctx *ctrl_ctx, |
3032 | u32 add_flags, u32 drop_flags) |
3033 | { |
3034 | ctrl_ctx->add_flags = cpu_to_le32(add_flags); |
3035 | ctrl_ctx->drop_flags = cpu_to_le32(drop_flags); |
3036 | xhci_slot_copy(xhci, in_ctx, out_ctx); |
3037 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
3038 | } |
3039 | |
3040 | static void xhci_endpoint_disable(struct usb_hcd *hcd, |
3041 | struct usb_host_endpoint *host_ep) |
3042 | { |
3043 | struct xhci_hcd *xhci; |
3044 | struct xhci_virt_device *vdev; |
3045 | struct xhci_virt_ep *ep; |
3046 | struct usb_device *udev; |
3047 | unsigned long flags; |
3048 | unsigned int ep_index; |
3049 | |
3050 | xhci = hcd_to_xhci(hcd); |
3051 | rescan: |
3052 | spin_lock_irqsave(&xhci->lock, flags); |
3053 | |
3054 | udev = (struct usb_device *)host_ep->hcpriv; |
3055 | if (!udev || !udev->slot_id) |
3056 | goto done; |
3057 | |
3058 | vdev = xhci->devs[udev->slot_id]; |
3059 | if (!vdev) |
3060 | goto done; |
3061 | |
3062 | ep_index = xhci_get_endpoint_index(&host_ep->desc); |
3063 | ep = &vdev->eps[ep_index]; |
3064 | |
3065 | /* wait for hub_tt_work to finish clearing hub TT */ |
3066 | if (ep->ep_state & EP_CLEARING_TT) { |
3067 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3068 | schedule_timeout_uninterruptible(timeout: 1); |
3069 | goto rescan; |
3070 | } |
3071 | |
3072 | if (ep->ep_state) |
3073 | xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n" , |
3074 | ep->ep_state); |
3075 | done: |
3076 | host_ep->hcpriv = NULL; |
3077 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3078 | } |
3079 | |
3080 | /* |
3081 | * Called after usb core issues a clear halt control message. |
3082 | * The host side of the halt should already be cleared by a reset endpoint |
3083 | * command issued when the STALL event was received. |
3084 | * |
3085 | * The reset endpoint command may only be issued to endpoints in the halted |
3086 | * state. For software that wishes to reset the data toggle or sequence number |
3087 | * of an endpoint that isn't in the halted state this function will issue a |
3088 | * configure endpoint command with the Drop and Add bits set for the target |
3089 | * endpoint. Refer to the additional note in xhci spcification section 4.6.8. |
3090 | */ |
3091 | |
3092 | static void xhci_endpoint_reset(struct usb_hcd *hcd, |
3093 | struct usb_host_endpoint *host_ep) |
3094 | { |
3095 | struct xhci_hcd *xhci; |
3096 | struct usb_device *udev; |
3097 | struct xhci_virt_device *vdev; |
3098 | struct xhci_virt_ep *ep; |
3099 | struct xhci_input_control_ctx *ctrl_ctx; |
3100 | struct xhci_command *stop_cmd, *cfg_cmd; |
3101 | unsigned int ep_index; |
3102 | unsigned long flags; |
3103 | u32 ep_flag; |
3104 | int err; |
3105 | |
3106 | xhci = hcd_to_xhci(hcd); |
3107 | if (!host_ep->hcpriv) |
3108 | return; |
3109 | udev = (struct usb_device *) host_ep->hcpriv; |
3110 | vdev = xhci->devs[udev->slot_id]; |
3111 | |
3112 | /* |
3113 | * vdev may be lost due to xHC restore error and re-initialization |
3114 | * during S3/S4 resume. A new vdev will be allocated later by |
3115 | * xhci_discover_or_reset_device() |
3116 | */ |
3117 | if (!udev->slot_id || !vdev) |
3118 | return; |
3119 | ep_index = xhci_get_endpoint_index(&host_ep->desc); |
3120 | ep = &vdev->eps[ep_index]; |
3121 | |
3122 | /* Bail out if toggle is already being cleared by a endpoint reset */ |
3123 | spin_lock_irqsave(&xhci->lock, flags); |
3124 | if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) { |
3125 | ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE; |
3126 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3127 | return; |
3128 | } |
3129 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3130 | /* Only interrupt and bulk ep's use data toggle, USB2 spec 5.5.4-> */ |
3131 | if (usb_endpoint_xfer_control(epd: &host_ep->desc) || |
3132 | usb_endpoint_xfer_isoc(epd: &host_ep->desc)) |
3133 | return; |
3134 | |
3135 | ep_flag = xhci_get_endpoint_flag(desc: &host_ep->desc); |
3136 | |
3137 | if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG) |
3138 | return; |
3139 | |
3140 | stop_cmd = xhci_alloc_command(xhci, allocate_completion: true, GFP_NOWAIT); |
3141 | if (!stop_cmd) |
3142 | return; |
3143 | |
3144 | cfg_cmd = xhci_alloc_command_with_ctx(xhci, allocate_completion: true, GFP_NOWAIT); |
3145 | if (!cfg_cmd) |
3146 | goto cleanup; |
3147 | |
3148 | spin_lock_irqsave(&xhci->lock, flags); |
3149 | |
3150 | /* block queuing new trbs and ringing ep doorbell */ |
3151 | ep->ep_state |= EP_SOFT_CLEAR_TOGGLE; |
3152 | |
3153 | /* |
3154 | * Make sure endpoint ring is empty before resetting the toggle/seq. |
3155 | * Driver is required to synchronously cancel all transfer request. |
3156 | * Stop the endpoint to force xHC to update the output context |
3157 | */ |
3158 | |
3159 | if (!list_empty(head: &ep->ring->td_list)) { |
3160 | dev_err(&udev->dev, "EP not empty, refuse reset\n" ); |
3161 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3162 | xhci_free_command(xhci, command: cfg_cmd); |
3163 | goto cleanup; |
3164 | } |
3165 | |
3166 | err = xhci_queue_stop_endpoint(xhci, cmd: stop_cmd, slot_id: udev->slot_id, |
3167 | ep_index, suspend: 0); |
3168 | if (err < 0) { |
3169 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3170 | xhci_free_command(xhci, command: cfg_cmd); |
3171 | xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d " , |
3172 | __func__, err); |
3173 | goto cleanup; |
3174 | } |
3175 | |
3176 | xhci_ring_cmd_db(xhci); |
3177 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3178 | |
3179 | wait_for_completion(stop_cmd->completion); |
3180 | |
3181 | spin_lock_irqsave(&xhci->lock, flags); |
3182 | |
3183 | /* config ep command clears toggle if add and drop ep flags are set */ |
3184 | ctrl_ctx = xhci_get_input_control_ctx(ctx: cfg_cmd->in_ctx); |
3185 | if (!ctrl_ctx) { |
3186 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3187 | xhci_free_command(xhci, command: cfg_cmd); |
3188 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n" , |
3189 | __func__); |
3190 | goto cleanup; |
3191 | } |
3192 | |
3193 | xhci_setup_input_ctx_for_config_ep(xhci, in_ctx: cfg_cmd->in_ctx, out_ctx: vdev->out_ctx, |
3194 | ctrl_ctx, add_flags: ep_flag, drop_flags: ep_flag); |
3195 | xhci_endpoint_copy(xhci, in_ctx: cfg_cmd->in_ctx, out_ctx: vdev->out_ctx, ep_index); |
3196 | |
3197 | err = xhci_queue_configure_endpoint(xhci, cmd: cfg_cmd, in_ctx_ptr: cfg_cmd->in_ctx->dma, |
3198 | slot_id: udev->slot_id, command_must_succeed: false); |
3199 | if (err < 0) { |
3200 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3201 | xhci_free_command(xhci, command: cfg_cmd); |
3202 | xhci_dbg(xhci, "%s: Failed to queue config ep command, %d " , |
3203 | __func__, err); |
3204 | goto cleanup; |
3205 | } |
3206 | |
3207 | xhci_ring_cmd_db(xhci); |
3208 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3209 | |
3210 | wait_for_completion(cfg_cmd->completion); |
3211 | |
3212 | xhci_free_command(xhci, command: cfg_cmd); |
3213 | cleanup: |
3214 | xhci_free_command(xhci, command: stop_cmd); |
3215 | spin_lock_irqsave(&xhci->lock, flags); |
3216 | if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE) |
3217 | ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE; |
3218 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3219 | } |
3220 | |
3221 | static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, |
3222 | struct usb_device *udev, struct usb_host_endpoint *ep, |
3223 | unsigned int slot_id) |
3224 | { |
3225 | int ret; |
3226 | unsigned int ep_index; |
3227 | unsigned int ep_state; |
3228 | |
3229 | if (!ep) |
3230 | return -EINVAL; |
3231 | ret = xhci_check_args(hcd: xhci_to_hcd(xhci), udev, ep, check_ep: 1, check_virt_dev: true, func: __func__); |
3232 | if (ret <= 0) |
3233 | return ret ? ret : -EINVAL; |
3234 | if (usb_ss_max_streams(comp: &ep->ss_ep_comp) == 0) { |
3235 | xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" |
3236 | " descriptor for ep 0x%x does not support streams\n" , |
3237 | ep->desc.bEndpointAddress); |
3238 | return -EINVAL; |
3239 | } |
3240 | |
3241 | ep_index = xhci_get_endpoint_index(&ep->desc); |
3242 | ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; |
3243 | if (ep_state & EP_HAS_STREAMS || |
3244 | ep_state & EP_GETTING_STREAMS) { |
3245 | xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x " |
3246 | "already has streams set up.\n" , |
3247 | ep->desc.bEndpointAddress); |
3248 | xhci_warn(xhci, "Send email to xHCI maintainer and ask for " |
3249 | "dynamic stream context array reallocation.\n" ); |
3250 | return -EINVAL; |
3251 | } |
3252 | if (!list_empty(head: &xhci->devs[slot_id]->eps[ep_index].ring->td_list)) { |
3253 | xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk " |
3254 | "endpoint 0x%x; URBs are pending.\n" , |
3255 | ep->desc.bEndpointAddress); |
3256 | return -EINVAL; |
3257 | } |
3258 | return 0; |
3259 | } |
3260 | |
3261 | static void xhci_calculate_streams_entries(struct xhci_hcd *xhci, |
3262 | unsigned int *num_streams, unsigned int *num_stream_ctxs) |
3263 | { |
3264 | unsigned int max_streams; |
3265 | |
3266 | /* The stream context array size must be a power of two */ |
3267 | *num_stream_ctxs = roundup_pow_of_two(*num_streams); |
3268 | /* |
3269 | * Find out how many primary stream array entries the host controller |
3270 | * supports. Later we may use secondary stream arrays (similar to 2nd |
3271 | * level page entries), but that's an optional feature for xHCI host |
3272 | * controllers. xHCs must support at least 4 stream IDs. |
3273 | */ |
3274 | max_streams = HCC_MAX_PSA(xhci->hcc_params); |
3275 | if (*num_stream_ctxs > max_streams) { |
3276 | xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n" , |
3277 | max_streams); |
3278 | *num_stream_ctxs = max_streams; |
3279 | *num_streams = max_streams; |
3280 | } |
3281 | } |
3282 | |
3283 | /* Returns an error code if one of the endpoint already has streams. |
3284 | * This does not change any data structures, it only checks and gathers |
3285 | * information. |
3286 | */ |
3287 | static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci, |
3288 | struct usb_device *udev, |
3289 | struct usb_host_endpoint **eps, unsigned int num_eps, |
3290 | unsigned int *num_streams, u32 *changed_ep_bitmask) |
3291 | { |
3292 | unsigned int max_streams; |
3293 | unsigned int endpoint_flag; |
3294 | int i; |
3295 | int ret; |
3296 | |
3297 | for (i = 0; i < num_eps; i++) { |
3298 | ret = xhci_check_streams_endpoint(xhci, udev, |
3299 | ep: eps[i], slot_id: udev->slot_id); |
3300 | if (ret < 0) |
3301 | return ret; |
3302 | |
3303 | max_streams = usb_ss_max_streams(comp: &eps[i]->ss_ep_comp); |
3304 | if (max_streams < (*num_streams - 1)) { |
3305 | xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n" , |
3306 | eps[i]->desc.bEndpointAddress, |
3307 | max_streams); |
3308 | *num_streams = max_streams+1; |
3309 | } |
3310 | |
3311 | endpoint_flag = xhci_get_endpoint_flag(desc: &eps[i]->desc); |
3312 | if (*changed_ep_bitmask & endpoint_flag) |
3313 | return -EINVAL; |
3314 | *changed_ep_bitmask |= endpoint_flag; |
3315 | } |
3316 | return 0; |
3317 | } |
3318 | |
3319 | static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci, |
3320 | struct usb_device *udev, |
3321 | struct usb_host_endpoint **eps, unsigned int num_eps) |
3322 | { |
3323 | u32 changed_ep_bitmask = 0; |
3324 | unsigned int slot_id; |
3325 | unsigned int ep_index; |
3326 | unsigned int ep_state; |
3327 | int i; |
3328 | |
3329 | slot_id = udev->slot_id; |
3330 | if (!xhci->devs[slot_id]) |
3331 | return 0; |
3332 | |
3333 | for (i = 0; i < num_eps; i++) { |
3334 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
3335 | ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; |
3336 | /* Are streams already being freed for the endpoint? */ |
3337 | if (ep_state & EP_GETTING_NO_STREAMS) { |
3338 | xhci_warn(xhci, "WARN Can't disable streams for " |
3339 | "endpoint 0x%x, " |
3340 | "streams are being disabled already\n" , |
3341 | eps[i]->desc.bEndpointAddress); |
3342 | return 0; |
3343 | } |
3344 | /* Are there actually any streams to free? */ |
3345 | if (!(ep_state & EP_HAS_STREAMS) && |
3346 | !(ep_state & EP_GETTING_STREAMS)) { |
3347 | xhci_warn(xhci, "WARN Can't disable streams for " |
3348 | "endpoint 0x%x, " |
3349 | "streams are already disabled!\n" , |
3350 | eps[i]->desc.bEndpointAddress); |
3351 | xhci_warn(xhci, "WARN xhci_free_streams() called " |
3352 | "with non-streams endpoint\n" ); |
3353 | return 0; |
3354 | } |
3355 | changed_ep_bitmask |= xhci_get_endpoint_flag(desc: &eps[i]->desc); |
3356 | } |
3357 | return changed_ep_bitmask; |
3358 | } |
3359 | |
3360 | /* |
3361 | * The USB device drivers use this function (through the HCD interface in USB |
3362 | * core) to prepare a set of bulk endpoints to use streams. Streams are used to |
3363 | * coordinate mass storage command queueing across multiple endpoints (basically |
3364 | * a stream ID == a task ID). |
3365 | * |
3366 | * Setting up streams involves allocating the same size stream context array |
3367 | * for each endpoint and issuing a configure endpoint command for all endpoints. |
3368 | * |
3369 | * Don't allow the call to succeed if one endpoint only supports one stream |
3370 | * (which means it doesn't support streams at all). |
3371 | * |
3372 | * Drivers may get less stream IDs than they asked for, if the host controller |
3373 | * hardware or endpoints claim they can't support the number of requested |
3374 | * stream IDs. |
3375 | */ |
3376 | static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, |
3377 | struct usb_host_endpoint **eps, unsigned int num_eps, |
3378 | unsigned int num_streams, gfp_t mem_flags) |
3379 | { |
3380 | int i, ret; |
3381 | struct xhci_hcd *xhci; |
3382 | struct xhci_virt_device *vdev; |
3383 | struct xhci_command *config_cmd; |
3384 | struct xhci_input_control_ctx *ctrl_ctx; |
3385 | unsigned int ep_index; |
3386 | unsigned int num_stream_ctxs; |
3387 | unsigned int max_packet; |
3388 | unsigned long flags; |
3389 | u32 changed_ep_bitmask = 0; |
3390 | |
3391 | if (!eps) |
3392 | return -EINVAL; |
3393 | |
3394 | /* Add one to the number of streams requested to account for |
3395 | * stream 0 that is reserved for xHCI usage. |
3396 | */ |
3397 | num_streams += 1; |
3398 | xhci = hcd_to_xhci(hcd); |
3399 | xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n" , |
3400 | num_streams); |
3401 | |
3402 | /* MaxPSASize value 0 (2 streams) means streams are not supported */ |
3403 | if ((xhci->quirks & XHCI_BROKEN_STREAMS) || |
3404 | HCC_MAX_PSA(xhci->hcc_params) < 4) { |
3405 | xhci_dbg(xhci, "xHCI controller does not support streams.\n" ); |
3406 | return -ENOSYS; |
3407 | } |
3408 | |
3409 | config_cmd = xhci_alloc_command_with_ctx(xhci, allocate_completion: true, mem_flags); |
3410 | if (!config_cmd) |
3411 | return -ENOMEM; |
3412 | |
3413 | ctrl_ctx = xhci_get_input_control_ctx(ctx: config_cmd->in_ctx); |
3414 | if (!ctrl_ctx) { |
3415 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n" , |
3416 | __func__); |
3417 | xhci_free_command(xhci, command: config_cmd); |
3418 | return -ENOMEM; |
3419 | } |
3420 | |
3421 | /* Check to make sure all endpoints are not already configured for |
3422 | * streams. While we're at it, find the maximum number of streams that |
3423 | * all the endpoints will support and check for duplicate endpoints. |
3424 | */ |
3425 | spin_lock_irqsave(&xhci->lock, flags); |
3426 | ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps, |
3427 | num_eps, num_streams: &num_streams, changed_ep_bitmask: &changed_ep_bitmask); |
3428 | if (ret < 0) { |
3429 | xhci_free_command(xhci, command: config_cmd); |
3430 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3431 | return ret; |
3432 | } |
3433 | if (num_streams <= 1) { |
3434 | xhci_warn(xhci, "WARN: endpoints can't handle " |
3435 | "more than one stream.\n" ); |
3436 | xhci_free_command(xhci, command: config_cmd); |
3437 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3438 | return -EINVAL; |
3439 | } |
3440 | vdev = xhci->devs[udev->slot_id]; |
3441 | /* Mark each endpoint as being in transition, so |
3442 | * xhci_urb_enqueue() will reject all URBs. |
3443 | */ |
3444 | for (i = 0; i < num_eps; i++) { |
3445 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
3446 | vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS; |
3447 | } |
3448 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3449 | |
3450 | /* Setup internal data structures and allocate HW data structures for |
3451 | * streams (but don't install the HW structures in the input context |
3452 | * until we're sure all memory allocation succeeded). |
3453 | */ |
3454 | xhci_calculate_streams_entries(xhci, num_streams: &num_streams, num_stream_ctxs: &num_stream_ctxs); |
3455 | xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n" , |
3456 | num_stream_ctxs, num_streams); |
3457 | |
3458 | for (i = 0; i < num_eps; i++) { |
3459 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
3460 | max_packet = usb_endpoint_maxp(epd: &eps[i]->desc); |
3461 | vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci, |
3462 | num_stream_ctxs, |
3463 | num_streams, |
3464 | max_packet, flags: mem_flags); |
3465 | if (!vdev->eps[ep_index].stream_info) |
3466 | goto cleanup; |
3467 | /* Set maxPstreams in endpoint context and update deq ptr to |
3468 | * point to stream context array. FIXME |
3469 | */ |
3470 | } |
3471 | |
3472 | /* Set up the input context for a configure endpoint command. */ |
3473 | for (i = 0; i < num_eps; i++) { |
3474 | struct xhci_ep_ctx *ep_ctx; |
3475 | |
3476 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
3477 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: config_cmd->in_ctx, ep_index); |
3478 | |
3479 | xhci_endpoint_copy(xhci, in_ctx: config_cmd->in_ctx, |
3480 | out_ctx: vdev->out_ctx, ep_index); |
3481 | xhci_setup_streams_ep_input_ctx(xhci, ep_ctx, |
3482 | stream_info: vdev->eps[ep_index].stream_info); |
3483 | } |
3484 | /* Tell the HW to drop its old copy of the endpoint context info |
3485 | * and add the updated copy from the input context. |
3486 | */ |
3487 | xhci_setup_input_ctx_for_config_ep(xhci, in_ctx: config_cmd->in_ctx, |
3488 | out_ctx: vdev->out_ctx, ctrl_ctx, |
3489 | add_flags: changed_ep_bitmask, drop_flags: changed_ep_bitmask); |
3490 | |
3491 | /* Issue and wait for the configure endpoint command */ |
3492 | ret = xhci_configure_endpoint(xhci, udev, command: config_cmd, |
3493 | ctx_change: false, must_succeed: false); |
3494 | |
3495 | /* xHC rejected the configure endpoint command for some reason, so we |
3496 | * leave the old ring intact and free our internal streams data |
3497 | * structure. |
3498 | */ |
3499 | if (ret < 0) |
3500 | goto cleanup; |
3501 | |
3502 | spin_lock_irqsave(&xhci->lock, flags); |
3503 | for (i = 0; i < num_eps; i++) { |
3504 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
3505 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; |
3506 | xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n" , |
3507 | udev->slot_id, ep_index); |
3508 | vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS; |
3509 | } |
3510 | xhci_free_command(xhci, command: config_cmd); |
3511 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3512 | |
3513 | for (i = 0; i < num_eps; i++) { |
3514 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
3515 | xhci_debugfs_create_stream_files(xhci, virt_dev: vdev, ep_index); |
3516 | } |
3517 | /* Subtract 1 for stream 0, which drivers can't use */ |
3518 | return num_streams - 1; |
3519 | |
3520 | cleanup: |
3521 | /* If it didn't work, free the streams! */ |
3522 | for (i = 0; i < num_eps; i++) { |
3523 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
3524 | xhci_free_stream_info(xhci, stream_info: vdev->eps[ep_index].stream_info); |
3525 | vdev->eps[ep_index].stream_info = NULL; |
3526 | /* FIXME Unset maxPstreams in endpoint context and |
3527 | * update deq ptr to point to normal string ring. |
3528 | */ |
3529 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; |
3530 | vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; |
3531 | xhci_endpoint_zero(xhci, virt_dev: vdev, ep: eps[i]); |
3532 | } |
3533 | xhci_free_command(xhci, command: config_cmd); |
3534 | return -ENOMEM; |
3535 | } |
3536 | |
3537 | /* Transition the endpoint from using streams to being a "normal" endpoint |
3538 | * without streams. |
3539 | * |
3540 | * Modify the endpoint context state, submit a configure endpoint command, |
3541 | * and free all endpoint rings for streams if that completes successfully. |
3542 | */ |
3543 | static int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev, |
3544 | struct usb_host_endpoint **eps, unsigned int num_eps, |
3545 | gfp_t mem_flags) |
3546 | { |
3547 | int i, ret; |
3548 | struct xhci_hcd *xhci; |
3549 | struct xhci_virt_device *vdev; |
3550 | struct xhci_command *command; |
3551 | struct xhci_input_control_ctx *ctrl_ctx; |
3552 | unsigned int ep_index; |
3553 | unsigned long flags; |
3554 | u32 changed_ep_bitmask; |
3555 | |
3556 | xhci = hcd_to_xhci(hcd); |
3557 | vdev = xhci->devs[udev->slot_id]; |
3558 | |
3559 | /* Set up a configure endpoint command to remove the streams rings */ |
3560 | spin_lock_irqsave(&xhci->lock, flags); |
3561 | changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci, |
3562 | udev, eps, num_eps); |
3563 | if (changed_ep_bitmask == 0) { |
3564 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3565 | return -EINVAL; |
3566 | } |
3567 | |
3568 | /* Use the xhci_command structure from the first endpoint. We may have |
3569 | * allocated too many, but the driver may call xhci_free_streams() for |
3570 | * each endpoint it grouped into one call to xhci_alloc_streams(). |
3571 | */ |
3572 | ep_index = xhci_get_endpoint_index(&eps[0]->desc); |
3573 | command = vdev->eps[ep_index].stream_info->free_streams_command; |
3574 | ctrl_ctx = xhci_get_input_control_ctx(ctx: command->in_ctx); |
3575 | if (!ctrl_ctx) { |
3576 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3577 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n" , |
3578 | __func__); |
3579 | return -EINVAL; |
3580 | } |
3581 | |
3582 | for (i = 0; i < num_eps; i++) { |
3583 | struct xhci_ep_ctx *ep_ctx; |
3584 | |
3585 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
3586 | ep_ctx = xhci_get_ep_ctx(xhci, ctx: command->in_ctx, ep_index); |
3587 | xhci->devs[udev->slot_id]->eps[ep_index].ep_state |= |
3588 | EP_GETTING_NO_STREAMS; |
3589 | |
3590 | xhci_endpoint_copy(xhci, in_ctx: command->in_ctx, |
3591 | out_ctx: vdev->out_ctx, ep_index); |
3592 | xhci_setup_no_streams_ep_input_ctx(ep_ctx, |
3593 | ep: &vdev->eps[ep_index]); |
3594 | } |
3595 | xhci_setup_input_ctx_for_config_ep(xhci, in_ctx: command->in_ctx, |
3596 | out_ctx: vdev->out_ctx, ctrl_ctx, |
3597 | add_flags: changed_ep_bitmask, drop_flags: changed_ep_bitmask); |
3598 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3599 | |
3600 | /* Issue and wait for the configure endpoint command, |
3601 | * which must succeed. |
3602 | */ |
3603 | ret = xhci_configure_endpoint(xhci, udev, command, |
3604 | ctx_change: false, must_succeed: true); |
3605 | |
3606 | /* xHC rejected the configure endpoint command for some reason, so we |
3607 | * leave the streams rings intact. |
3608 | */ |
3609 | if (ret < 0) |
3610 | return ret; |
3611 | |
3612 | spin_lock_irqsave(&xhci->lock, flags); |
3613 | for (i = 0; i < num_eps; i++) { |
3614 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
3615 | xhci_free_stream_info(xhci, stream_info: vdev->eps[ep_index].stream_info); |
3616 | vdev->eps[ep_index].stream_info = NULL; |
3617 | /* FIXME Unset maxPstreams in endpoint context and |
3618 | * update deq ptr to point to normal string ring. |
3619 | */ |
3620 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS; |
3621 | vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; |
3622 | } |
3623 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3624 | |
3625 | return 0; |
3626 | } |
3627 | |
3628 | /* |
3629 | * Deletes endpoint resources for endpoints that were active before a Reset |
3630 | * Device command, or a Disable Slot command. The Reset Device command leaves |
3631 | * the control endpoint intact, whereas the Disable Slot command deletes it. |
3632 | * |
3633 | * Must be called with xhci->lock held. |
3634 | */ |
3635 | void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, |
3636 | struct xhci_virt_device *virt_dev, bool drop_control_ep) |
3637 | { |
3638 | int i; |
3639 | unsigned int num_dropped_eps = 0; |
3640 | unsigned int drop_flags = 0; |
3641 | |
3642 | for (i = (drop_control_ep ? 0 : 1); i < 31; i++) { |
3643 | if (virt_dev->eps[i].ring) { |
3644 | drop_flags |= 1 << i; |
3645 | num_dropped_eps++; |
3646 | } |
3647 | } |
3648 | xhci->num_active_eps -= num_dropped_eps; |
3649 | if (num_dropped_eps) |
3650 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
3651 | fmt: "Dropped %u ep ctxs, flags = 0x%x, " |
3652 | "%u now active." , |
3653 | num_dropped_eps, drop_flags, |
3654 | xhci->num_active_eps); |
3655 | } |
3656 | |
3657 | /* |
3658 | * This submits a Reset Device Command, which will set the device state to 0, |
3659 | * set the device address to 0, and disable all the endpoints except the default |
3660 | * control endpoint. The USB core should come back and call |
3661 | * xhci_address_device(), and then re-set up the configuration. If this is |
3662 | * called because of a usb_reset_and_verify_device(), then the old alternate |
3663 | * settings will be re-installed through the normal bandwidth allocation |
3664 | * functions. |
3665 | * |
3666 | * Wait for the Reset Device command to finish. Remove all structures |
3667 | * associated with the endpoints that were disabled. Clear the input device |
3668 | * structure? Reset the control endpoint 0 max packet size? |
3669 | * |
3670 | * If the virt_dev to be reset does not exist or does not match the udev, |
3671 | * it means the device is lost, possibly due to the xHC restore error and |
3672 | * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to |
3673 | * re-allocate the device. |
3674 | */ |
3675 | static int xhci_discover_or_reset_device(struct usb_hcd *hcd, |
3676 | struct usb_device *udev) |
3677 | { |
3678 | int ret, i; |
3679 | unsigned long flags; |
3680 | struct xhci_hcd *xhci; |
3681 | unsigned int slot_id; |
3682 | struct xhci_virt_device *virt_dev; |
3683 | struct xhci_command *reset_device_cmd; |
3684 | struct xhci_slot_ctx *slot_ctx; |
3685 | int old_active_eps = 0; |
3686 | |
3687 | ret = xhci_check_args(hcd, udev, NULL, check_ep: 0, check_virt_dev: false, func: __func__); |
3688 | if (ret <= 0) |
3689 | return ret; |
3690 | xhci = hcd_to_xhci(hcd); |
3691 | slot_id = udev->slot_id; |
3692 | virt_dev = xhci->devs[slot_id]; |
3693 | if (!virt_dev) { |
3694 | xhci_dbg(xhci, "The device to be reset with slot ID %u does " |
3695 | "not exist. Re-allocate the device\n" , slot_id); |
3696 | ret = xhci_alloc_dev(hcd, udev); |
3697 | if (ret == 1) |
3698 | return 0; |
3699 | else |
3700 | return -EINVAL; |
3701 | } |
3702 | |
3703 | if (virt_dev->tt_info) |
3704 | old_active_eps = virt_dev->tt_info->active_eps; |
3705 | |
3706 | if (virt_dev->udev != udev) { |
3707 | /* If the virt_dev and the udev does not match, this virt_dev |
3708 | * may belong to another udev. |
3709 | * Re-allocate the device. |
3710 | */ |
3711 | xhci_dbg(xhci, "The device to be reset with slot ID %u does " |
3712 | "not match the udev. Re-allocate the device\n" , |
3713 | slot_id); |
3714 | ret = xhci_alloc_dev(hcd, udev); |
3715 | if (ret == 1) |
3716 | return 0; |
3717 | else |
3718 | return -EINVAL; |
3719 | } |
3720 | |
3721 | /* If device is not setup, there is no point in resetting it */ |
3722 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: virt_dev->out_ctx); |
3723 | if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == |
3724 | SLOT_STATE_DISABLED) |
3725 | return 0; |
3726 | |
3727 | trace_xhci_discover_or_reset_device(ctx: slot_ctx); |
3728 | |
3729 | xhci_dbg(xhci, "Resetting device with slot ID %u\n" , slot_id); |
3730 | /* Allocate the command structure that holds the struct completion. |
3731 | * Assume we're in process context, since the normal device reset |
3732 | * process has to wait for the device anyway. Storage devices are |
3733 | * reset as part of error handling, so use GFP_NOIO instead of |
3734 | * GFP_KERNEL. |
3735 | */ |
3736 | reset_device_cmd = xhci_alloc_command(xhci, allocate_completion: true, GFP_NOIO); |
3737 | if (!reset_device_cmd) { |
3738 | xhci_dbg(xhci, "Couldn't allocate command structure.\n" ); |
3739 | return -ENOMEM; |
3740 | } |
3741 | |
3742 | /* Attempt to submit the Reset Device command to the command ring */ |
3743 | spin_lock_irqsave(&xhci->lock, flags); |
3744 | |
3745 | ret = xhci_queue_reset_device(xhci, cmd: reset_device_cmd, slot_id); |
3746 | if (ret) { |
3747 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n" ); |
3748 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3749 | goto command_cleanup; |
3750 | } |
3751 | xhci_ring_cmd_db(xhci); |
3752 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3753 | |
3754 | /* Wait for the Reset Device command to finish */ |
3755 | wait_for_completion(reset_device_cmd->completion); |
3756 | |
3757 | /* The Reset Device command can't fail, according to the 0.95/0.96 spec, |
3758 | * unless we tried to reset a slot ID that wasn't enabled, |
3759 | * or the device wasn't in the addressed or configured state. |
3760 | */ |
3761 | ret = reset_device_cmd->status; |
3762 | switch (ret) { |
3763 | case COMP_COMMAND_ABORTED: |
3764 | case COMP_COMMAND_RING_STOPPED: |
3765 | xhci_warn(xhci, "Timeout waiting for reset device command\n" ); |
3766 | ret = -ETIME; |
3767 | goto command_cleanup; |
3768 | case COMP_SLOT_NOT_ENABLED_ERROR: /* 0.95 completion for bad slot ID */ |
3769 | case COMP_CONTEXT_STATE_ERROR: /* 0.96 completion code for same thing */ |
3770 | xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n" , |
3771 | slot_id, |
3772 | xhci_get_slot_state(xhci, virt_dev->out_ctx)); |
3773 | xhci_dbg(xhci, "Not freeing device rings.\n" ); |
3774 | /* Don't treat this as an error. May change my mind later. */ |
3775 | ret = 0; |
3776 | goto command_cleanup; |
3777 | case COMP_SUCCESS: |
3778 | xhci_dbg(xhci, "Successful reset device command.\n" ); |
3779 | break; |
3780 | default: |
3781 | if (xhci_is_vendor_info_code(xhci, trb_comp_code: ret)) |
3782 | break; |
3783 | xhci_warn(xhci, "Unknown completion code %u for " |
3784 | "reset device command.\n" , ret); |
3785 | ret = -EINVAL; |
3786 | goto command_cleanup; |
3787 | } |
3788 | |
3789 | /* Free up host controller endpoint resources */ |
3790 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { |
3791 | spin_lock_irqsave(&xhci->lock, flags); |
3792 | /* Don't delete the default control endpoint resources */ |
3793 | xhci_free_device_endpoint_resources(xhci, virt_dev, drop_control_ep: false); |
3794 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3795 | } |
3796 | |
3797 | /* Everything but endpoint 0 is disabled, so free the rings. */ |
3798 | for (i = 1; i < 31; i++) { |
3799 | struct xhci_virt_ep *ep = &virt_dev->eps[i]; |
3800 | |
3801 | if (ep->ep_state & EP_HAS_STREAMS) { |
3802 | xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n" , |
3803 | xhci_get_endpoint_address(i)); |
3804 | xhci_free_stream_info(xhci, stream_info: ep->stream_info); |
3805 | ep->stream_info = NULL; |
3806 | ep->ep_state &= ~EP_HAS_STREAMS; |
3807 | } |
3808 | |
3809 | if (ep->ring) { |
3810 | xhci_debugfs_remove_endpoint(xhci, virt_dev, ep_index: i); |
3811 | xhci_free_endpoint_ring(xhci, virt_dev, ep_index: i); |
3812 | } |
3813 | if (!list_empty(head: &virt_dev->eps[i].bw_endpoint_list)) |
3814 | xhci_drop_ep_from_interval_table(xhci, |
3815 | ep_bw: &virt_dev->eps[i].bw_info, |
3816 | bw_table: virt_dev->bw_table, |
3817 | udev, |
3818 | virt_ep: &virt_dev->eps[i], |
3819 | tt_info: virt_dev->tt_info); |
3820 | xhci_clear_endpoint_bw_info(bw_info: &virt_dev->eps[i].bw_info); |
3821 | } |
3822 | /* If necessary, update the number of active TTs on this root port */ |
3823 | xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); |
3824 | virt_dev->flags = 0; |
3825 | ret = 0; |
3826 | |
3827 | command_cleanup: |
3828 | xhci_free_command(xhci, command: reset_device_cmd); |
3829 | return ret; |
3830 | } |
3831 | |
3832 | /* |
3833 | * At this point, the struct usb_device is about to go away, the device has |
3834 | * disconnected, and all traffic has been stopped and the endpoints have been |
3835 | * disabled. Free any HC data structures associated with that device. |
3836 | */ |
3837 | static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) |
3838 | { |
3839 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
3840 | struct xhci_virt_device *virt_dev; |
3841 | struct xhci_slot_ctx *slot_ctx; |
3842 | unsigned long flags; |
3843 | int i, ret; |
3844 | |
3845 | /* |
3846 | * We called pm_runtime_get_noresume when the device was attached. |
3847 | * Decrement the counter here to allow controller to runtime suspend |
3848 | * if no devices remain. |
3849 | */ |
3850 | if (xhci->quirks & XHCI_RESET_ON_RESUME) |
3851 | pm_runtime_put_noidle(dev: hcd->self.controller); |
3852 | |
3853 | ret = xhci_check_args(hcd, udev, NULL, check_ep: 0, check_virt_dev: true, func: __func__); |
3854 | /* If the host is halted due to driver unload, we still need to free the |
3855 | * device. |
3856 | */ |
3857 | if (ret <= 0 && ret != -ENODEV) |
3858 | return; |
3859 | |
3860 | virt_dev = xhci->devs[udev->slot_id]; |
3861 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: virt_dev->out_ctx); |
3862 | trace_xhci_free_dev(ctx: slot_ctx); |
3863 | |
3864 | /* Stop any wayward timer functions (which may grab the lock) */ |
3865 | for (i = 0; i < 31; i++) |
3866 | virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING; |
3867 | virt_dev->udev = NULL; |
3868 | xhci_disable_slot(xhci, slot_id: udev->slot_id); |
3869 | |
3870 | spin_lock_irqsave(&xhci->lock, flags); |
3871 | xhci_free_virt_device(xhci, slot_id: udev->slot_id); |
3872 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3873 | |
3874 | } |
3875 | |
3876 | int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) |
3877 | { |
3878 | struct xhci_command *command; |
3879 | unsigned long flags; |
3880 | u32 state; |
3881 | int ret; |
3882 | |
3883 | command = xhci_alloc_command(xhci, allocate_completion: true, GFP_KERNEL); |
3884 | if (!command) |
3885 | return -ENOMEM; |
3886 | |
3887 | xhci_debugfs_remove_slot(xhci, slot_id); |
3888 | |
3889 | spin_lock_irqsave(&xhci->lock, flags); |
3890 | /* Don't disable the slot if the host controller is dead. */ |
3891 | state = readl(addr: &xhci->op_regs->status); |
3892 | if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || |
3893 | (xhci->xhc_state & XHCI_STATE_HALTED)) { |
3894 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3895 | kfree(objp: command); |
3896 | return -ENODEV; |
3897 | } |
3898 | |
3899 | ret = xhci_queue_slot_control(xhci, cmd: command, TRB_DISABLE_SLOT, |
3900 | slot_id); |
3901 | if (ret) { |
3902 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3903 | kfree(objp: command); |
3904 | return ret; |
3905 | } |
3906 | xhci_ring_cmd_db(xhci); |
3907 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3908 | |
3909 | wait_for_completion(command->completion); |
3910 | |
3911 | if (command->status != COMP_SUCCESS) |
3912 | xhci_warn(xhci, "Unsuccessful disable slot %u command, status %d\n" , |
3913 | slot_id, command->status); |
3914 | |
3915 | xhci_free_command(xhci, command); |
3916 | |
3917 | return 0; |
3918 | } |
3919 | |
3920 | /* |
3921 | * Checks if we have enough host controller resources for the default control |
3922 | * endpoint. |
3923 | * |
3924 | * Must be called with xhci->lock held. |
3925 | */ |
3926 | static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci) |
3927 | { |
3928 | if (xhci->num_active_eps + 1 > xhci->limit_active_eps) { |
3929 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
3930 | fmt: "Not enough ep ctxs: " |
3931 | "%u active, need to add 1, limit is %u." , |
3932 | xhci->num_active_eps, xhci->limit_active_eps); |
3933 | return -ENOMEM; |
3934 | } |
3935 | xhci->num_active_eps += 1; |
3936 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_quirks, |
3937 | fmt: "Adding 1 ep ctx, %u now active." , |
3938 | xhci->num_active_eps); |
3939 | return 0; |
3940 | } |
3941 | |
3942 | |
3943 | /* |
3944 | * Returns 0 if the xHC ran out of device slots, the Enable Slot command |
3945 | * timed out, or allocating memory failed. Returns 1 on success. |
3946 | */ |
3947 | int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) |
3948 | { |
3949 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
3950 | struct xhci_virt_device *vdev; |
3951 | struct xhci_slot_ctx *slot_ctx; |
3952 | unsigned long flags; |
3953 | int ret, slot_id; |
3954 | struct xhci_command *command; |
3955 | |
3956 | command = xhci_alloc_command(xhci, allocate_completion: true, GFP_KERNEL); |
3957 | if (!command) |
3958 | return 0; |
3959 | |
3960 | spin_lock_irqsave(&xhci->lock, flags); |
3961 | ret = xhci_queue_slot_control(xhci, cmd: command, TRB_ENABLE_SLOT, slot_id: 0); |
3962 | if (ret) { |
3963 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3964 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n" ); |
3965 | xhci_free_command(xhci, command); |
3966 | return 0; |
3967 | } |
3968 | xhci_ring_cmd_db(xhci); |
3969 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3970 | |
3971 | wait_for_completion(command->completion); |
3972 | slot_id = command->slot_id; |
3973 | |
3974 | if (!slot_id || command->status != COMP_SUCCESS) { |
3975 | xhci_err(xhci, "Error while assigning device slot ID: %s\n" , |
3976 | xhci_trb_comp_code_string(command->status)); |
3977 | xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n" , |
3978 | HCS_MAX_SLOTS( |
3979 | readl(&xhci->cap_regs->hcs_params1))); |
3980 | xhci_free_command(xhci, command); |
3981 | return 0; |
3982 | } |
3983 | |
3984 | xhci_free_command(xhci, command); |
3985 | |
3986 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { |
3987 | spin_lock_irqsave(&xhci->lock, flags); |
3988 | ret = xhci_reserve_host_control_ep_resources(xhci); |
3989 | if (ret) { |
3990 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3991 | xhci_warn(xhci, "Not enough host resources, " |
3992 | "active endpoint contexts = %u\n" , |
3993 | xhci->num_active_eps); |
3994 | goto disable_slot; |
3995 | } |
3996 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
3997 | } |
3998 | /* Use GFP_NOIO, since this function can be called from |
3999 | * xhci_discover_or_reset_device(), which may be called as part of |
4000 | * mass storage driver error handling. |
4001 | */ |
4002 | if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) { |
4003 | xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n" ); |
4004 | goto disable_slot; |
4005 | } |
4006 | vdev = xhci->devs[slot_id]; |
4007 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: vdev->out_ctx); |
4008 | trace_xhci_alloc_dev(ctx: slot_ctx); |
4009 | |
4010 | udev->slot_id = slot_id; |
4011 | |
4012 | xhci_debugfs_create_slot(xhci, slot_id); |
4013 | |
4014 | /* |
4015 | * If resetting upon resume, we can't put the controller into runtime |
4016 | * suspend if there is a device attached. |
4017 | */ |
4018 | if (xhci->quirks & XHCI_RESET_ON_RESUME) |
4019 | pm_runtime_get_noresume(dev: hcd->self.controller); |
4020 | |
4021 | /* Is this a LS or FS device under a HS hub? */ |
4022 | /* Hub or peripherial? */ |
4023 | return 1; |
4024 | |
4025 | disable_slot: |
4026 | xhci_disable_slot(xhci, slot_id: udev->slot_id); |
4027 | xhci_free_virt_device(xhci, slot_id: udev->slot_id); |
4028 | |
4029 | return 0; |
4030 | } |
4031 | |
4032 | /* |
4033 | * Issue an Address Device command and optionally send a corresponding |
4034 | * SetAddress request to the device. |
4035 | */ |
4036 | static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, |
4037 | enum xhci_setup_dev setup) |
4038 | { |
4039 | const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address" ; |
4040 | unsigned long flags; |
4041 | struct xhci_virt_device *virt_dev; |
4042 | int ret = 0; |
4043 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
4044 | struct xhci_slot_ctx *slot_ctx; |
4045 | struct xhci_input_control_ctx *ctrl_ctx; |
4046 | u64 temp_64; |
4047 | struct xhci_command *command = NULL; |
4048 | |
4049 | mutex_lock(&xhci->mutex); |
4050 | |
4051 | if (xhci->xhc_state) { /* dying, removing or halted */ |
4052 | ret = -ESHUTDOWN; |
4053 | goto out; |
4054 | } |
4055 | |
4056 | if (!udev->slot_id) { |
4057 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_address, |
4058 | fmt: "Bad Slot ID %d" , udev->slot_id); |
4059 | ret = -EINVAL; |
4060 | goto out; |
4061 | } |
4062 | |
4063 | virt_dev = xhci->devs[udev->slot_id]; |
4064 | |
4065 | if (WARN_ON(!virt_dev)) { |
4066 | /* |
4067 | * In plug/unplug torture test with an NEC controller, |
4068 | * a zero-dereference was observed once due to virt_dev = 0. |
4069 | * Print useful debug rather than crash if it is observed again! |
4070 | */ |
4071 | xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n" , |
4072 | udev->slot_id); |
4073 | ret = -EINVAL; |
4074 | goto out; |
4075 | } |
4076 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: virt_dev->out_ctx); |
4077 | trace_xhci_setup_device_slot(ctx: slot_ctx); |
4078 | |
4079 | if (setup == SETUP_CONTEXT_ONLY) { |
4080 | if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == |
4081 | SLOT_STATE_DEFAULT) { |
4082 | xhci_dbg(xhci, "Slot already in default state\n" ); |
4083 | goto out; |
4084 | } |
4085 | } |
4086 | |
4087 | command = xhci_alloc_command(xhci, allocate_completion: true, GFP_KERNEL); |
4088 | if (!command) { |
4089 | ret = -ENOMEM; |
4090 | goto out; |
4091 | } |
4092 | |
4093 | command->in_ctx = virt_dev->in_ctx; |
4094 | |
4095 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: virt_dev->in_ctx); |
4096 | ctrl_ctx = xhci_get_input_control_ctx(ctx: virt_dev->in_ctx); |
4097 | if (!ctrl_ctx) { |
4098 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n" , |
4099 | __func__); |
4100 | ret = -EINVAL; |
4101 | goto out; |
4102 | } |
4103 | /* |
4104 | * If this is the first Set Address since device plug-in or |
4105 | * virt_device realloaction after a resume with an xHCI power loss, |
4106 | * then set up the slot context. |
4107 | */ |
4108 | if (!slot_ctx->dev_info) |
4109 | xhci_setup_addressable_virt_dev(xhci, udev); |
4110 | /* Otherwise, update the control endpoint ring enqueue pointer. */ |
4111 | else |
4112 | xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); |
4113 | ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); |
4114 | ctrl_ctx->drop_flags = 0; |
4115 | |
4116 | trace_xhci_address_ctx(xhci, ctx: virt_dev->in_ctx, |
4117 | le32_to_cpu(slot_ctx->dev_info) >> 27); |
4118 | |
4119 | trace_xhci_address_ctrl_ctx(ctrl_ctx); |
4120 | spin_lock_irqsave(&xhci->lock, flags); |
4121 | trace_xhci_setup_device(vdev: virt_dev); |
4122 | ret = xhci_queue_address_device(xhci, cmd: command, in_ctx_ptr: virt_dev->in_ctx->dma, |
4123 | slot_id: udev->slot_id, setup); |
4124 | if (ret) { |
4125 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4126 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_address, |
4127 | fmt: "FIXME: allocate a command ring segment" ); |
4128 | goto out; |
4129 | } |
4130 | xhci_ring_cmd_db(xhci); |
4131 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4132 | |
4133 | /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ |
4134 | wait_for_completion(command->completion); |
4135 | |
4136 | /* FIXME: From section 4.3.4: "Software shall be responsible for timing |
4137 | * the SetAddress() "recovery interval" required by USB and aborting the |
4138 | * command on a timeout. |
4139 | */ |
4140 | switch (command->status) { |
4141 | case COMP_COMMAND_ABORTED: |
4142 | case COMP_COMMAND_RING_STOPPED: |
4143 | xhci_warn(xhci, "Timeout while waiting for setup device command\n" ); |
4144 | ret = -ETIME; |
4145 | break; |
4146 | case COMP_CONTEXT_STATE_ERROR: |
4147 | case COMP_SLOT_NOT_ENABLED_ERROR: |
4148 | xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n" , |
4149 | act, udev->slot_id); |
4150 | ret = -EINVAL; |
4151 | break; |
4152 | case COMP_USB_TRANSACTION_ERROR: |
4153 | dev_warn(&udev->dev, "Device not responding to setup %s.\n" , act); |
4154 | |
4155 | mutex_unlock(lock: &xhci->mutex); |
4156 | ret = xhci_disable_slot(xhci, slot_id: udev->slot_id); |
4157 | xhci_free_virt_device(xhci, slot_id: udev->slot_id); |
4158 | if (!ret) |
4159 | xhci_alloc_dev(hcd, udev); |
4160 | kfree(objp: command->completion); |
4161 | kfree(objp: command); |
4162 | return -EPROTO; |
4163 | case COMP_INCOMPATIBLE_DEVICE_ERROR: |
4164 | dev_warn(&udev->dev, |
4165 | "ERROR: Incompatible device for setup %s command\n" , act); |
4166 | ret = -ENODEV; |
4167 | break; |
4168 | case COMP_SUCCESS: |
4169 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_address, |
4170 | fmt: "Successful setup %s command" , act); |
4171 | break; |
4172 | default: |
4173 | xhci_err(xhci, |
4174 | "ERROR: unexpected setup %s command completion code 0x%x.\n" , |
4175 | act, command->status); |
4176 | trace_xhci_address_ctx(xhci, ctx: virt_dev->out_ctx, ep_num: 1); |
4177 | ret = -EINVAL; |
4178 | break; |
4179 | } |
4180 | if (ret) |
4181 | goto out; |
4182 | temp_64 = xhci_read_64(xhci, regs: &xhci->op_regs->dcbaa_ptr); |
4183 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_address, |
4184 | fmt: "Op regs DCBAA ptr = %#016llx" , temp_64); |
4185 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_address, |
4186 | fmt: "Slot ID %d dcbaa entry @%p = %#016llx" , |
4187 | udev->slot_id, |
4188 | &xhci->dcbaa->dev_context_ptrs[udev->slot_id], |
4189 | (unsigned long long) |
4190 | le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id])); |
4191 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_address, |
4192 | fmt: "Output Context DMA address = %#08llx" , |
4193 | (unsigned long long)virt_dev->out_ctx->dma); |
4194 | trace_xhci_address_ctx(xhci, ctx: virt_dev->in_ctx, |
4195 | le32_to_cpu(slot_ctx->dev_info) >> 27); |
4196 | /* |
4197 | * USB core uses address 1 for the roothubs, so we add one to the |
4198 | * address given back to us by the HC. |
4199 | */ |
4200 | trace_xhci_address_ctx(xhci, ctx: virt_dev->out_ctx, |
4201 | le32_to_cpu(slot_ctx->dev_info) >> 27); |
4202 | /* Zero the input context control for later use */ |
4203 | ctrl_ctx->add_flags = 0; |
4204 | ctrl_ctx->drop_flags = 0; |
4205 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: virt_dev->out_ctx); |
4206 | udev->devaddr = (u8)(le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); |
4207 | |
4208 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_address, |
4209 | fmt: "Internal device address = %d" , |
4210 | le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); |
4211 | out: |
4212 | mutex_unlock(lock: &xhci->mutex); |
4213 | if (command) { |
4214 | kfree(objp: command->completion); |
4215 | kfree(objp: command); |
4216 | } |
4217 | return ret; |
4218 | } |
4219 | |
4220 | static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) |
4221 | { |
4222 | return xhci_setup_device(hcd, udev, setup: SETUP_CONTEXT_ADDRESS); |
4223 | } |
4224 | |
4225 | static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev) |
4226 | { |
4227 | return xhci_setup_device(hcd, udev, setup: SETUP_CONTEXT_ONLY); |
4228 | } |
4229 | |
4230 | /* |
4231 | * Transfer the port index into real index in the HW port status |
4232 | * registers. Caculate offset between the port's PORTSC register |
4233 | * and port status base. Divide the number of per port register |
4234 | * to get the real index. The raw port number bases 1. |
4235 | */ |
4236 | int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1) |
4237 | { |
4238 | struct xhci_hub *rhub; |
4239 | |
4240 | rhub = xhci_get_rhub(hcd); |
4241 | return rhub->ports[port1 - 1]->hw_portnum + 1; |
4242 | } |
4243 | |
4244 | /* |
4245 | * Issue an Evaluate Context command to change the Maximum Exit Latency in the |
4246 | * slot context. If that succeeds, store the new MEL in the xhci_virt_device. |
4247 | */ |
4248 | static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci, |
4249 | struct usb_device *udev, u16 max_exit_latency) |
4250 | { |
4251 | struct xhci_virt_device *virt_dev; |
4252 | struct xhci_command *command; |
4253 | struct xhci_input_control_ctx *ctrl_ctx; |
4254 | struct xhci_slot_ctx *slot_ctx; |
4255 | unsigned long flags; |
4256 | int ret; |
4257 | |
4258 | command = xhci_alloc_command_with_ctx(xhci, allocate_completion: true, GFP_KERNEL); |
4259 | if (!command) |
4260 | return -ENOMEM; |
4261 | |
4262 | spin_lock_irqsave(&xhci->lock, flags); |
4263 | |
4264 | virt_dev = xhci->devs[udev->slot_id]; |
4265 | |
4266 | /* |
4267 | * virt_dev might not exists yet if xHC resumed from hibernate (S4) and |
4268 | * xHC was re-initialized. Exit latency will be set later after |
4269 | * hub_port_finish_reset() is done and xhci->devs[] are re-allocated |
4270 | */ |
4271 | |
4272 | if (!virt_dev || max_exit_latency == virt_dev->current_mel) { |
4273 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4274 | xhci_free_command(xhci, command); |
4275 | return 0; |
4276 | } |
4277 | |
4278 | /* Attempt to issue an Evaluate Context command to change the MEL. */ |
4279 | ctrl_ctx = xhci_get_input_control_ctx(ctx: command->in_ctx); |
4280 | if (!ctrl_ctx) { |
4281 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4282 | xhci_free_command(xhci, command); |
4283 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n" , |
4284 | __func__); |
4285 | return -ENOMEM; |
4286 | } |
4287 | |
4288 | xhci_slot_copy(xhci, in_ctx: command->in_ctx, out_ctx: virt_dev->out_ctx); |
4289 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4290 | |
4291 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
4292 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: command->in_ctx); |
4293 | slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT)); |
4294 | slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency); |
4295 | slot_ctx->dev_state = 0; |
4296 | |
4297 | xhci_dbg_trace(xhci, trace: trace_xhci_dbg_context_change, |
4298 | fmt: "Set up evaluate context for LPM MEL change." ); |
4299 | |
4300 | /* Issue and wait for the evaluate context command. */ |
4301 | ret = xhci_configure_endpoint(xhci, udev, command, |
4302 | ctx_change: true, must_succeed: true); |
4303 | |
4304 | if (!ret) { |
4305 | spin_lock_irqsave(&xhci->lock, flags); |
4306 | virt_dev->current_mel = max_exit_latency; |
4307 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4308 | } |
4309 | |
4310 | xhci_free_command(xhci, command); |
4311 | |
4312 | return ret; |
4313 | } |
4314 | |
4315 | #ifdef CONFIG_PM |
4316 | |
4317 | /* BESL to HIRD Encoding array for USB2 LPM */ |
4318 | static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000, |
4319 | 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000}; |
4320 | |
4321 | /* Calculate HIRD/BESL for USB2 PORTPMSC*/ |
4322 | static int xhci_calculate_hird_besl(struct xhci_hcd *xhci, |
4323 | struct usb_device *udev) |
4324 | { |
4325 | int u2del, besl, besl_host; |
4326 | int besl_device = 0; |
4327 | u32 field; |
4328 | |
4329 | u2del = HCS_U2_LATENCY(xhci->hcs_params3); |
4330 | field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); |
4331 | |
4332 | if (field & USB_BESL_SUPPORT) { |
4333 | for (besl_host = 0; besl_host < 16; besl_host++) { |
4334 | if (xhci_besl_encoding[besl_host] >= u2del) |
4335 | break; |
4336 | } |
4337 | /* Use baseline BESL value as default */ |
4338 | if (field & USB_BESL_BASELINE_VALID) |
4339 | besl_device = USB_GET_BESL_BASELINE(field); |
4340 | else if (field & USB_BESL_DEEP_VALID) |
4341 | besl_device = USB_GET_BESL_DEEP(field); |
4342 | } else { |
4343 | if (u2del <= 50) |
4344 | besl_host = 0; |
4345 | else |
4346 | besl_host = (u2del - 51) / 75 + 1; |
4347 | } |
4348 | |
4349 | besl = besl_host + besl_device; |
4350 | if (besl > 15) |
4351 | besl = 15; |
4352 | |
4353 | return besl; |
4354 | } |
4355 | |
4356 | /* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */ |
4357 | static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev) |
4358 | { |
4359 | u32 field; |
4360 | int l1; |
4361 | int besld = 0; |
4362 | int hirdm = 0; |
4363 | |
4364 | field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); |
4365 | |
4366 | /* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */ |
4367 | l1 = udev->l1_params.timeout / 256; |
4368 | |
4369 | /* device has preferred BESLD */ |
4370 | if (field & USB_BESL_DEEP_VALID) { |
4371 | besld = USB_GET_BESL_DEEP(field); |
4372 | hirdm = 1; |
4373 | } |
4374 | |
4375 | return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm); |
4376 | } |
4377 | |
4378 | static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, |
4379 | struct usb_device *udev, int enable) |
4380 | { |
4381 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
4382 | struct xhci_port **ports; |
4383 | __le32 __iomem *pm_addr, *hlpm_addr; |
4384 | u32 pm_val, hlpm_val, field; |
4385 | unsigned int port_num; |
4386 | unsigned long flags; |
4387 | int hird, exit_latency; |
4388 | int ret; |
4389 | |
4390 | if (xhci->quirks & XHCI_HW_LPM_DISABLE) |
4391 | return -EPERM; |
4392 | |
4393 | if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support || |
4394 | !udev->lpm_capable) |
4395 | return -EPERM; |
4396 | |
4397 | if (!udev->parent || udev->parent->parent || |
4398 | udev->descriptor.bDeviceClass == USB_CLASS_HUB) |
4399 | return -EPERM; |
4400 | |
4401 | if (udev->usb2_hw_lpm_capable != 1) |
4402 | return -EPERM; |
4403 | |
4404 | spin_lock_irqsave(&xhci->lock, flags); |
4405 | |
4406 | ports = xhci->usb2_rhub.ports; |
4407 | port_num = udev->portnum - 1; |
4408 | pm_addr = ports[port_num]->addr + PORTPMSC; |
4409 | pm_val = readl(addr: pm_addr); |
4410 | hlpm_addr = ports[port_num]->addr + PORTHLPMC; |
4411 | |
4412 | xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n" , |
4413 | enable ? "enable" : "disable" , port_num + 1); |
4414 | |
4415 | if (enable) { |
4416 | /* Host supports BESL timeout instead of HIRD */ |
4417 | if (udev->usb2_hw_lpm_besl_capable) { |
4418 | /* if device doesn't have a preferred BESL value use a |
4419 | * default one which works with mixed HIRD and BESL |
4420 | * systems. See XHCI_DEFAULT_BESL definition in xhci.h |
4421 | */ |
4422 | field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); |
4423 | if ((field & USB_BESL_SUPPORT) && |
4424 | (field & USB_BESL_BASELINE_VALID)) |
4425 | hird = USB_GET_BESL_BASELINE(field); |
4426 | else |
4427 | hird = udev->l1_params.besl; |
4428 | |
4429 | exit_latency = xhci_besl_encoding[hird]; |
4430 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4431 | |
4432 | ret = xhci_change_max_exit_latency(xhci, udev, |
4433 | max_exit_latency: exit_latency); |
4434 | if (ret < 0) |
4435 | return ret; |
4436 | spin_lock_irqsave(&xhci->lock, flags); |
4437 | |
4438 | hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev); |
4439 | writel(val: hlpm_val, addr: hlpm_addr); |
4440 | /* flush write */ |
4441 | readl(addr: hlpm_addr); |
4442 | } else { |
4443 | hird = xhci_calculate_hird_besl(xhci, udev); |
4444 | } |
4445 | |
4446 | pm_val &= ~PORT_HIRD_MASK; |
4447 | pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id); |
4448 | writel(val: pm_val, addr: pm_addr); |
4449 | pm_val = readl(addr: pm_addr); |
4450 | pm_val |= PORT_HLE; |
4451 | writel(val: pm_val, addr: pm_addr); |
4452 | /* flush write */ |
4453 | readl(addr: pm_addr); |
4454 | } else { |
4455 | pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK); |
4456 | writel(val: pm_val, addr: pm_addr); |
4457 | /* flush write */ |
4458 | readl(addr: pm_addr); |
4459 | if (udev->usb2_hw_lpm_besl_capable) { |
4460 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4461 | xhci_change_max_exit_latency(xhci, udev, max_exit_latency: 0); |
4462 | readl_poll_timeout(ports[port_num]->addr, pm_val, |
4463 | (pm_val & PORT_PLS_MASK) == XDEV_U0, |
4464 | 100, 10000); |
4465 | return 0; |
4466 | } |
4467 | } |
4468 | |
4469 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
4470 | return 0; |
4471 | } |
4472 | |
4473 | /* check if a usb2 port supports a given extened capability protocol |
4474 | * only USB2 ports extended protocol capability values are cached. |
4475 | * Return 1 if capability is supported |
4476 | */ |
4477 | static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port, |
4478 | unsigned capability) |
4479 | { |
4480 | u32 port_offset, port_count; |
4481 | int i; |
4482 | |
4483 | for (i = 0; i < xhci->num_ext_caps; i++) { |
4484 | if (xhci->ext_caps[i] & capability) { |
4485 | /* port offsets starts at 1 */ |
4486 | port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1; |
4487 | port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]); |
4488 | if (port >= port_offset && |
4489 | port < port_offset + port_count) |
4490 | return 1; |
4491 | } |
4492 | } |
4493 | return 0; |
4494 | } |
4495 | |
4496 | static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) |
4497 | { |
4498 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
4499 | int portnum = udev->portnum - 1; |
4500 | |
4501 | if (hcd->speed >= HCD_USB3 || !udev->lpm_capable) |
4502 | return 0; |
4503 | |
4504 | /* we only support lpm for non-hub device connected to root hub yet */ |
4505 | if (!udev->parent || udev->parent->parent || |
4506 | udev->descriptor.bDeviceClass == USB_CLASS_HUB) |
4507 | return 0; |
4508 | |
4509 | if (xhci->hw_lpm_support == 1 && |
4510 | xhci_check_usb2_port_capability( |
4511 | xhci, port: portnum, XHCI_HLC)) { |
4512 | udev->usb2_hw_lpm_capable = 1; |
4513 | udev->l1_params.timeout = XHCI_L1_TIMEOUT; |
4514 | udev->l1_params.besl = XHCI_DEFAULT_BESL; |
4515 | if (xhci_check_usb2_port_capability(xhci, port: portnum, |
4516 | XHCI_BLC)) |
4517 | udev->usb2_hw_lpm_besl_capable = 1; |
4518 | } |
4519 | |
4520 | return 0; |
4521 | } |
4522 | |
4523 | /*---------------------- USB 3.0 Link PM functions ------------------------*/ |
4524 | |
4525 | /* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */ |
4526 | static unsigned long long xhci_service_interval_to_ns( |
4527 | struct usb_endpoint_descriptor *desc) |
4528 | { |
4529 | return (1ULL << (desc->bInterval - 1)) * 125 * 1000; |
4530 | } |
4531 | |
4532 | static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev, |
4533 | enum usb3_link_state state) |
4534 | { |
4535 | unsigned long long sel; |
4536 | unsigned long long pel; |
4537 | unsigned int max_sel_pel; |
4538 | char *state_name; |
4539 | |
4540 | switch (state) { |
4541 | case USB3_LPM_U1: |
4542 | /* Convert SEL and PEL stored in nanoseconds to microseconds */ |
4543 | sel = DIV_ROUND_UP(udev->u1_params.sel, 1000); |
4544 | pel = DIV_ROUND_UP(udev->u1_params.pel, 1000); |
4545 | max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL; |
4546 | state_name = "U1" ; |
4547 | break; |
4548 | case USB3_LPM_U2: |
4549 | sel = DIV_ROUND_UP(udev->u2_params.sel, 1000); |
4550 | pel = DIV_ROUND_UP(udev->u2_params.pel, 1000); |
4551 | max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL; |
4552 | state_name = "U2" ; |
4553 | break; |
4554 | default: |
4555 | dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n" , |
4556 | __func__); |
4557 | return USB3_LPM_DISABLED; |
4558 | } |
4559 | |
4560 | if (sel <= max_sel_pel && pel <= max_sel_pel) |
4561 | return USB3_LPM_DEVICE_INITIATED; |
4562 | |
4563 | if (sel > max_sel_pel) |
4564 | dev_dbg(&udev->dev, "Device-initiated %s disabled " |
4565 | "due to long SEL %llu ms\n" , |
4566 | state_name, sel); |
4567 | else |
4568 | dev_dbg(&udev->dev, "Device-initiated %s disabled " |
4569 | "due to long PEL %llu ms\n" , |
4570 | state_name, pel); |
4571 | return USB3_LPM_DISABLED; |
4572 | } |
4573 | |
4574 | /* The U1 timeout should be the maximum of the following values: |
4575 | * - For control endpoints, U1 system exit latency (SEL) * 3 |
4576 | * - For bulk endpoints, U1 SEL * 5 |
4577 | * - For interrupt endpoints: |
4578 | * - Notification EPs, U1 SEL * 3 |
4579 | * - Periodic EPs, max(105% of bInterval, U1 SEL * 2) |
4580 | * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2) |
4581 | */ |
4582 | static unsigned long long xhci_calculate_intel_u1_timeout( |
4583 | struct usb_device *udev, |
4584 | struct usb_endpoint_descriptor *desc) |
4585 | { |
4586 | unsigned long long timeout_ns; |
4587 | int ep_type; |
4588 | int intr_type; |
4589 | |
4590 | ep_type = usb_endpoint_type(epd: desc); |
4591 | switch (ep_type) { |
4592 | case USB_ENDPOINT_XFER_CONTROL: |
4593 | timeout_ns = udev->u1_params.sel * 3; |
4594 | break; |
4595 | case USB_ENDPOINT_XFER_BULK: |
4596 | timeout_ns = udev->u1_params.sel * 5; |
4597 | break; |
4598 | case USB_ENDPOINT_XFER_INT: |
4599 | intr_type = usb_endpoint_interrupt_type(epd: desc); |
4600 | if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) { |
4601 | timeout_ns = udev->u1_params.sel * 3; |
4602 | break; |
4603 | } |
4604 | /* Otherwise the calculation is the same as isoc eps */ |
4605 | fallthrough; |
4606 | case USB_ENDPOINT_XFER_ISOC: |
4607 | timeout_ns = xhci_service_interval_to_ns(desc); |
4608 | timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100); |
4609 | if (timeout_ns < udev->u1_params.sel * 2) |
4610 | timeout_ns = udev->u1_params.sel * 2; |
4611 | break; |
4612 | default: |
4613 | return 0; |
4614 | } |
4615 | |
4616 | return timeout_ns; |
4617 | } |
4618 | |
4619 | /* Returns the hub-encoded U1 timeout value. */ |
4620 | static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci, |
4621 | struct usb_device *udev, |
4622 | struct usb_endpoint_descriptor *desc) |
4623 | { |
4624 | unsigned long long timeout_ns; |
4625 | |
4626 | /* Prevent U1 if service interval is shorter than U1 exit latency */ |
4627 | if (usb_endpoint_xfer_int(epd: desc) || usb_endpoint_xfer_isoc(epd: desc)) { |
4628 | if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) { |
4629 | dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n" ); |
4630 | return USB3_LPM_DISABLED; |
4631 | } |
4632 | } |
4633 | |
4634 | if (xhci->quirks & (XHCI_INTEL_HOST | XHCI_ZHAOXIN_HOST)) |
4635 | timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc); |
4636 | else |
4637 | timeout_ns = udev->u1_params.sel; |
4638 | |
4639 | /* The U1 timeout is encoded in 1us intervals. |
4640 | * Don't return a timeout of zero, because that's USB3_LPM_DISABLED. |
4641 | */ |
4642 | if (timeout_ns == USB3_LPM_DISABLED) |
4643 | timeout_ns = 1; |
4644 | else |
4645 | timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000); |
4646 | |
4647 | /* If the necessary timeout value is bigger than what we can set in the |
4648 | * USB 3.0 hub, we have to disable hub-initiated U1. |
4649 | */ |
4650 | if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT) |
4651 | return timeout_ns; |
4652 | dev_dbg(&udev->dev, "Hub-initiated U1 disabled " |
4653 | "due to long timeout %llu ms\n" , timeout_ns); |
4654 | return xhci_get_timeout_no_hub_lpm(udev, state: USB3_LPM_U1); |
4655 | } |
4656 | |
4657 | /* The U2 timeout should be the maximum of: |
4658 | * - 10 ms (to avoid the bandwidth impact on the scheduler) |
4659 | * - largest bInterval of any active periodic endpoint (to avoid going |
4660 | * into lower power link states between intervals). |
4661 | * - the U2 Exit Latency of the device |
4662 | */ |
4663 | static unsigned long long xhci_calculate_intel_u2_timeout( |
4664 | struct usb_device *udev, |
4665 | struct usb_endpoint_descriptor *desc) |
4666 | { |
4667 | unsigned long long timeout_ns; |
4668 | unsigned long long u2_del_ns; |
4669 | |
4670 | timeout_ns = 10 * 1000 * 1000; |
4671 | |
4672 | if ((usb_endpoint_xfer_int(epd: desc) || usb_endpoint_xfer_isoc(epd: desc)) && |
4673 | (xhci_service_interval_to_ns(desc) > timeout_ns)) |
4674 | timeout_ns = xhci_service_interval_to_ns(desc); |
4675 | |
4676 | u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL; |
4677 | if (u2_del_ns > timeout_ns) |
4678 | timeout_ns = u2_del_ns; |
4679 | |
4680 | return timeout_ns; |
4681 | } |
4682 | |
4683 | /* Returns the hub-encoded U2 timeout value. */ |
4684 | static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci, |
4685 | struct usb_device *udev, |
4686 | struct usb_endpoint_descriptor *desc) |
4687 | { |
4688 | unsigned long long timeout_ns; |
4689 | |
4690 | /* Prevent U2 if service interval is shorter than U2 exit latency */ |
4691 | if (usb_endpoint_xfer_int(epd: desc) || usb_endpoint_xfer_isoc(epd: desc)) { |
4692 | if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) { |
4693 | dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n" ); |
4694 | return USB3_LPM_DISABLED; |
4695 | } |
4696 | } |
4697 | |
4698 | if (xhci->quirks & (XHCI_INTEL_HOST | XHCI_ZHAOXIN_HOST)) |
4699 | timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc); |
4700 | else |
4701 | timeout_ns = udev->u2_params.sel; |
4702 | |
4703 | /* The U2 timeout is encoded in 256us intervals */ |
4704 | timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000); |
4705 | /* If the necessary timeout value is bigger than what we can set in the |
4706 | * USB 3.0 hub, we have to disable hub-initiated U2. |
4707 | */ |
4708 | if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT) |
4709 | return timeout_ns; |
4710 | dev_dbg(&udev->dev, "Hub-initiated U2 disabled " |
4711 | "due to long timeout %llu ms\n" , timeout_ns); |
4712 | return xhci_get_timeout_no_hub_lpm(udev, state: USB3_LPM_U2); |
4713 | } |
4714 | |
4715 | static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci, |
4716 | struct usb_device *udev, |
4717 | struct usb_endpoint_descriptor *desc, |
4718 | enum usb3_link_state state, |
4719 | u16 *timeout) |
4720 | { |
4721 | if (state == USB3_LPM_U1) |
4722 | return xhci_calculate_u1_timeout(xhci, udev, desc); |
4723 | else if (state == USB3_LPM_U2) |
4724 | return xhci_calculate_u2_timeout(xhci, udev, desc); |
4725 | |
4726 | return USB3_LPM_DISABLED; |
4727 | } |
4728 | |
4729 | static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci, |
4730 | struct usb_device *udev, |
4731 | struct usb_endpoint_descriptor *desc, |
4732 | enum usb3_link_state state, |
4733 | u16 *timeout) |
4734 | { |
4735 | u16 alt_timeout; |
4736 | |
4737 | alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev, |
4738 | desc, state, timeout); |
4739 | |
4740 | /* If we found we can't enable hub-initiated LPM, and |
4741 | * the U1 or U2 exit latency was too high to allow |
4742 | * device-initiated LPM as well, then we will disable LPM |
4743 | * for this device, so stop searching any further. |
4744 | */ |
4745 | if (alt_timeout == USB3_LPM_DISABLED) { |
4746 | *timeout = alt_timeout; |
4747 | return -E2BIG; |
4748 | } |
4749 | if (alt_timeout > *timeout) |
4750 | *timeout = alt_timeout; |
4751 | return 0; |
4752 | } |
4753 | |
4754 | static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci, |
4755 | struct usb_device *udev, |
4756 | struct usb_host_interface *alt, |
4757 | enum usb3_link_state state, |
4758 | u16 *timeout) |
4759 | { |
4760 | int j; |
4761 | |
4762 | for (j = 0; j < alt->desc.bNumEndpoints; j++) { |
4763 | if (xhci_update_timeout_for_endpoint(xhci, udev, |
4764 | desc: &alt->endpoint[j].desc, state, timeout)) |
4765 | return -E2BIG; |
4766 | } |
4767 | return 0; |
4768 | } |
4769 | |
4770 | static int xhci_check_tier_policy(struct xhci_hcd *xhci, |
4771 | struct usb_device *udev, |
4772 | enum usb3_link_state state) |
4773 | { |
4774 | struct usb_device *parent = udev->parent; |
4775 | int tier = 1; /* roothub is tier1 */ |
4776 | |
4777 | while (parent) { |
4778 | parent = parent->parent; |
4779 | tier++; |
4780 | } |
4781 | |
4782 | if (xhci->quirks & XHCI_INTEL_HOST && tier > 3) |
4783 | goto fail; |
4784 | if (xhci->quirks & XHCI_ZHAOXIN_HOST && tier > 2) |
4785 | goto fail; |
4786 | |
4787 | return 0; |
4788 | fail: |
4789 | dev_dbg(&udev->dev, "Tier policy prevents U1/U2 LPM states for devices at tier %d\n" , |
4790 | tier); |
4791 | return -E2BIG; |
4792 | } |
4793 | |
4794 | /* Returns the U1 or U2 timeout that should be enabled. |
4795 | * If the tier check or timeout setting functions return with a non-zero exit |
4796 | * code, that means the timeout value has been finalized and we shouldn't look |
4797 | * at any more endpoints. |
4798 | */ |
4799 | static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd, |
4800 | struct usb_device *udev, enum usb3_link_state state) |
4801 | { |
4802 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
4803 | struct usb_host_config *config; |
4804 | char *state_name; |
4805 | int i; |
4806 | u16 timeout = USB3_LPM_DISABLED; |
4807 | |
4808 | if (state == USB3_LPM_U1) |
4809 | state_name = "U1" ; |
4810 | else if (state == USB3_LPM_U2) |
4811 | state_name = "U2" ; |
4812 | else { |
4813 | dev_warn(&udev->dev, "Can't enable unknown link state %i\n" , |
4814 | state); |
4815 | return timeout; |
4816 | } |
4817 | |
4818 | /* Gather some information about the currently installed configuration |
4819 | * and alternate interface settings. |
4820 | */ |
4821 | if (xhci_update_timeout_for_endpoint(xhci, udev, desc: &udev->ep0.desc, |
4822 | state, timeout: &timeout)) |
4823 | return timeout; |
4824 | |
4825 | config = udev->actconfig; |
4826 | if (!config) |
4827 | return timeout; |
4828 | |
4829 | for (i = 0; i < config->desc.bNumInterfaces; i++) { |
4830 | struct usb_driver *driver; |
4831 | struct usb_interface *intf = config->interface[i]; |
4832 | |
4833 | if (!intf) |
4834 | continue; |
4835 | |
4836 | /* Check if any currently bound drivers want hub-initiated LPM |
4837 | * disabled. |
4838 | */ |
4839 | if (intf->dev.driver) { |
4840 | driver = to_usb_driver(intf->dev.driver); |
4841 | if (driver && driver->disable_hub_initiated_lpm) { |
4842 | dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n" , |
4843 | state_name, driver->name); |
4844 | timeout = xhci_get_timeout_no_hub_lpm(udev, |
4845 | state); |
4846 | if (timeout == USB3_LPM_DISABLED) |
4847 | return timeout; |
4848 | } |
4849 | } |
4850 | |
4851 | /* Not sure how this could happen... */ |
4852 | if (!intf->cur_altsetting) |
4853 | continue; |
4854 | |
4855 | if (xhci_update_timeout_for_interface(xhci, udev, |
4856 | alt: intf->cur_altsetting, |
4857 | state, timeout: &timeout)) |
4858 | return timeout; |
4859 | } |
4860 | return timeout; |
4861 | } |
4862 | |
4863 | static int calculate_max_exit_latency(struct usb_device *udev, |
4864 | enum usb3_link_state state_changed, |
4865 | u16 hub_encoded_timeout) |
4866 | { |
4867 | unsigned long long u1_mel_us = 0; |
4868 | unsigned long long u2_mel_us = 0; |
4869 | unsigned long long mel_us = 0; |
4870 | bool disabling_u1; |
4871 | bool disabling_u2; |
4872 | bool enabling_u1; |
4873 | bool enabling_u2; |
4874 | |
4875 | disabling_u1 = (state_changed == USB3_LPM_U1 && |
4876 | hub_encoded_timeout == USB3_LPM_DISABLED); |
4877 | disabling_u2 = (state_changed == USB3_LPM_U2 && |
4878 | hub_encoded_timeout == USB3_LPM_DISABLED); |
4879 | |
4880 | enabling_u1 = (state_changed == USB3_LPM_U1 && |
4881 | hub_encoded_timeout != USB3_LPM_DISABLED); |
4882 | enabling_u2 = (state_changed == USB3_LPM_U2 && |
4883 | hub_encoded_timeout != USB3_LPM_DISABLED); |
4884 | |
4885 | /* If U1 was already enabled and we're not disabling it, |
4886 | * or we're going to enable U1, account for the U1 max exit latency. |
4887 | */ |
4888 | if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) || |
4889 | enabling_u1) |
4890 | u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000); |
4891 | if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) || |
4892 | enabling_u2) |
4893 | u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000); |
4894 | |
4895 | mel_us = max(u1_mel_us, u2_mel_us); |
4896 | |
4897 | /* xHCI host controller max exit latency field is only 16 bits wide. */ |
4898 | if (mel_us > MAX_EXIT) { |
4899 | dev_warn(&udev->dev, "Link PM max exit latency of %lluus " |
4900 | "is too big.\n" , mel_us); |
4901 | return -E2BIG; |
4902 | } |
4903 | return mel_us; |
4904 | } |
4905 | |
4906 | /* Returns the USB3 hub-encoded value for the U1/U2 timeout. */ |
4907 | static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, |
4908 | struct usb_device *udev, enum usb3_link_state state) |
4909 | { |
4910 | struct xhci_hcd *xhci; |
4911 | struct xhci_port *port; |
4912 | u16 hub_encoded_timeout; |
4913 | int mel; |
4914 | int ret; |
4915 | |
4916 | xhci = hcd_to_xhci(hcd); |
4917 | /* The LPM timeout values are pretty host-controller specific, so don't |
4918 | * enable hub-initiated timeouts unless the vendor has provided |
4919 | * information about their timeout algorithm. |
4920 | */ |
4921 | if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || |
4922 | !xhci->devs[udev->slot_id]) |
4923 | return USB3_LPM_DISABLED; |
4924 | |
4925 | if (xhci_check_tier_policy(xhci, udev, state) < 0) |
4926 | return USB3_LPM_DISABLED; |
4927 | |
4928 | /* If connected to root port then check port can handle lpm */ |
4929 | if (udev->parent && !udev->parent->parent) { |
4930 | port = xhci->usb3_rhub.ports[udev->portnum - 1]; |
4931 | if (port->lpm_incapable) |
4932 | return USB3_LPM_DISABLED; |
4933 | } |
4934 | |
4935 | hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state); |
4936 | mel = calculate_max_exit_latency(udev, state_changed: state, hub_encoded_timeout); |
4937 | if (mel < 0) { |
4938 | /* Max Exit Latency is too big, disable LPM. */ |
4939 | hub_encoded_timeout = USB3_LPM_DISABLED; |
4940 | mel = 0; |
4941 | } |
4942 | |
4943 | ret = xhci_change_max_exit_latency(xhci, udev, max_exit_latency: mel); |
4944 | if (ret) |
4945 | return ret; |
4946 | return hub_encoded_timeout; |
4947 | } |
4948 | |
4949 | static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd, |
4950 | struct usb_device *udev, enum usb3_link_state state) |
4951 | { |
4952 | struct xhci_hcd *xhci; |
4953 | u16 mel; |
4954 | |
4955 | xhci = hcd_to_xhci(hcd); |
4956 | if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || |
4957 | !xhci->devs[udev->slot_id]) |
4958 | return 0; |
4959 | |
4960 | mel = calculate_max_exit_latency(udev, state_changed: state, USB3_LPM_DISABLED); |
4961 | return xhci_change_max_exit_latency(xhci, udev, max_exit_latency: mel); |
4962 | } |
4963 | #else /* CONFIG_PM */ |
4964 | |
4965 | static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, |
4966 | struct usb_device *udev, int enable) |
4967 | { |
4968 | return 0; |
4969 | } |
4970 | |
4971 | static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) |
4972 | { |
4973 | return 0; |
4974 | } |
4975 | |
4976 | static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, |
4977 | struct usb_device *udev, enum usb3_link_state state) |
4978 | { |
4979 | return USB3_LPM_DISABLED; |
4980 | } |
4981 | |
4982 | static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd, |
4983 | struct usb_device *udev, enum usb3_link_state state) |
4984 | { |
4985 | return 0; |
4986 | } |
4987 | #endif /* CONFIG_PM */ |
4988 | |
4989 | /*-------------------------------------------------------------------------*/ |
4990 | |
4991 | /* Once a hub descriptor is fetched for a device, we need to update the xHC's |
4992 | * internal data structures for the device. |
4993 | */ |
4994 | int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, |
4995 | struct usb_tt *tt, gfp_t mem_flags) |
4996 | { |
4997 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
4998 | struct xhci_virt_device *vdev; |
4999 | struct xhci_command *config_cmd; |
5000 | struct xhci_input_control_ctx *ctrl_ctx; |
5001 | struct xhci_slot_ctx *slot_ctx; |
5002 | unsigned long flags; |
5003 | unsigned think_time; |
5004 | int ret; |
5005 | |
5006 | /* Ignore root hubs */ |
5007 | if (!hdev->parent) |
5008 | return 0; |
5009 | |
5010 | vdev = xhci->devs[hdev->slot_id]; |
5011 | if (!vdev) { |
5012 | xhci_warn(xhci, "Cannot update hub desc for unknown device.\n" ); |
5013 | return -EINVAL; |
5014 | } |
5015 | |
5016 | config_cmd = xhci_alloc_command_with_ctx(xhci, allocate_completion: true, mem_flags); |
5017 | if (!config_cmd) |
5018 | return -ENOMEM; |
5019 | |
5020 | ctrl_ctx = xhci_get_input_control_ctx(ctx: config_cmd->in_ctx); |
5021 | if (!ctrl_ctx) { |
5022 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n" , |
5023 | __func__); |
5024 | xhci_free_command(xhci, command: config_cmd); |
5025 | return -ENOMEM; |
5026 | } |
5027 | |
5028 | spin_lock_irqsave(&xhci->lock, flags); |
5029 | if (hdev->speed == USB_SPEED_HIGH && |
5030 | xhci_alloc_tt_info(xhci, virt_dev: vdev, hdev, tt, GFP_ATOMIC)) { |
5031 | xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n" ); |
5032 | xhci_free_command(xhci, command: config_cmd); |
5033 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
5034 | return -ENOMEM; |
5035 | } |
5036 | |
5037 | xhci_slot_copy(xhci, in_ctx: config_cmd->in_ctx, out_ctx: vdev->out_ctx); |
5038 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
5039 | slot_ctx = xhci_get_slot_ctx(xhci, ctx: config_cmd->in_ctx); |
5040 | slot_ctx->dev_info |= cpu_to_le32(DEV_HUB); |
5041 | /* |
5042 | * refer to section 6.2.2: MTT should be 0 for full speed hub, |
5043 | * but it may be already set to 1 when setup an xHCI virtual |
5044 | * device, so clear it anyway. |
5045 | */ |
5046 | if (tt->multi) |
5047 | slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); |
5048 | else if (hdev->speed == USB_SPEED_FULL) |
5049 | slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT); |
5050 | |
5051 | if (xhci->hci_version > 0x95) { |
5052 | xhci_dbg(xhci, "xHCI version %x needs hub " |
5053 | "TT think time and number of ports\n" , |
5054 | (unsigned int) xhci->hci_version); |
5055 | slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild)); |
5056 | /* Set TT think time - convert from ns to FS bit times. |
5057 | * 0 = 8 FS bit times, 1 = 16 FS bit times, |
5058 | * 2 = 24 FS bit times, 3 = 32 FS bit times. |
5059 | * |
5060 | * xHCI 1.0: this field shall be 0 if the device is not a |
5061 | * High-spped hub. |
5062 | */ |
5063 | think_time = tt->think_time; |
5064 | if (think_time != 0) |
5065 | think_time = (think_time / 666) - 1; |
5066 | if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH) |
5067 | slot_ctx->tt_info |= |
5068 | cpu_to_le32(TT_THINK_TIME(think_time)); |
5069 | } else { |
5070 | xhci_dbg(xhci, "xHCI version %x doesn't need hub " |
5071 | "TT think time or number of ports\n" , |
5072 | (unsigned int) xhci->hci_version); |
5073 | } |
5074 | slot_ctx->dev_state = 0; |
5075 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
5076 | |
5077 | xhci_dbg(xhci, "Set up %s for hub device.\n" , |
5078 | (xhci->hci_version > 0x95) ? |
5079 | "configure endpoint" : "evaluate context" ); |
5080 | |
5081 | /* Issue and wait for the configure endpoint or |
5082 | * evaluate context command. |
5083 | */ |
5084 | if (xhci->hci_version > 0x95) |
5085 | ret = xhci_configure_endpoint(xhci, udev: hdev, command: config_cmd, |
5086 | ctx_change: false, must_succeed: false); |
5087 | else |
5088 | ret = xhci_configure_endpoint(xhci, udev: hdev, command: config_cmd, |
5089 | ctx_change: true, must_succeed: false); |
5090 | |
5091 | xhci_free_command(xhci, command: config_cmd); |
5092 | return ret; |
5093 | } |
5094 | EXPORT_SYMBOL_GPL(xhci_update_hub_device); |
5095 | |
5096 | static int xhci_get_frame(struct usb_hcd *hcd) |
5097 | { |
5098 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
5099 | /* EHCI mods by the periodic size. Why? */ |
5100 | return readl(addr: &xhci->run_regs->microframe_index) >> 3; |
5101 | } |
5102 | |
5103 | static void xhci_hcd_init_usb2_data(struct xhci_hcd *xhci, struct usb_hcd *hcd) |
5104 | { |
5105 | xhci->usb2_rhub.hcd = hcd; |
5106 | hcd->speed = HCD_USB2; |
5107 | hcd->self.root_hub->speed = USB_SPEED_HIGH; |
5108 | /* |
5109 | * USB 2.0 roothub under xHCI has an integrated TT, |
5110 | * (rate matching hub) as opposed to having an OHCI/UHCI |
5111 | * companion controller. |
5112 | */ |
5113 | hcd->has_tt = 1; |
5114 | } |
5115 | |
5116 | static void xhci_hcd_init_usb3_data(struct xhci_hcd *xhci, struct usb_hcd *hcd) |
5117 | { |
5118 | unsigned int minor_rev; |
5119 | |
5120 | /* |
5121 | * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts |
5122 | * should return 0x31 for sbrn, or that the minor revision |
5123 | * is a two digit BCD containig minor and sub-minor numbers. |
5124 | * This was later clarified in xHCI 1.2. |
5125 | * |
5126 | * Some USB 3.1 capable hosts therefore have sbrn 0x30, and |
5127 | * minor revision set to 0x1 instead of 0x10. |
5128 | */ |
5129 | if (xhci->usb3_rhub.min_rev == 0x1) |
5130 | minor_rev = 1; |
5131 | else |
5132 | minor_rev = xhci->usb3_rhub.min_rev / 0x10; |
5133 | |
5134 | switch (minor_rev) { |
5135 | case 2: |
5136 | hcd->speed = HCD_USB32; |
5137 | hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; |
5138 | hcd->self.root_hub->rx_lanes = 2; |
5139 | hcd->self.root_hub->tx_lanes = 2; |
5140 | hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x2; |
5141 | break; |
5142 | case 1: |
5143 | hcd->speed = HCD_USB31; |
5144 | hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; |
5145 | hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x1; |
5146 | break; |
5147 | } |
5148 | xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n" , |
5149 | minor_rev, minor_rev ? "Enhanced " : "" ); |
5150 | |
5151 | xhci->usb3_rhub.hcd = hcd; |
5152 | } |
5153 | |
5154 | int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) |
5155 | { |
5156 | struct xhci_hcd *xhci; |
5157 | /* |
5158 | * TODO: Check with DWC3 clients for sysdev according to |
5159 | * quirks |
5160 | */ |
5161 | struct device *dev = hcd->self.sysdev; |
5162 | int retval; |
5163 | |
5164 | /* Accept arbitrarily long scatter-gather lists */ |
5165 | hcd->self.sg_tablesize = ~0; |
5166 | |
5167 | /* support to build packet from discontinuous buffers */ |
5168 | hcd->self.no_sg_constraint = 1; |
5169 | |
5170 | /* XHCI controllers don't stop the ep queue on short packets :| */ |
5171 | hcd->self.no_stop_on_short = 1; |
5172 | |
5173 | xhci = hcd_to_xhci(hcd); |
5174 | |
5175 | if (!usb_hcd_is_primary_hcd(hcd)) { |
5176 | xhci_hcd_init_usb3_data(xhci, hcd); |
5177 | return 0; |
5178 | } |
5179 | |
5180 | mutex_init(&xhci->mutex); |
5181 | xhci->main_hcd = hcd; |
5182 | xhci->cap_regs = hcd->regs; |
5183 | xhci->op_regs = hcd->regs + |
5184 | HC_LENGTH(readl(&xhci->cap_regs->hc_capbase)); |
5185 | xhci->run_regs = hcd->regs + |
5186 | (readl(addr: &xhci->cap_regs->run_regs_off) & RTSOFF_MASK); |
5187 | /* Cache read-only capability registers */ |
5188 | xhci->hcs_params1 = readl(addr: &xhci->cap_regs->hcs_params1); |
5189 | xhci->hcs_params2 = readl(addr: &xhci->cap_regs->hcs_params2); |
5190 | xhci->hcs_params3 = readl(addr: &xhci->cap_regs->hcs_params3); |
5191 | xhci->hci_version = HC_VERSION(readl(&xhci->cap_regs->hc_capbase)); |
5192 | xhci->hcc_params = readl(addr: &xhci->cap_regs->hcc_params); |
5193 | if (xhci->hci_version > 0x100) |
5194 | xhci->hcc_params2 = readl(addr: &xhci->cap_regs->hcc_params2); |
5195 | |
5196 | /* xhci-plat or xhci-pci might have set max_interrupters already */ |
5197 | if ((!xhci->max_interrupters) || |
5198 | xhci->max_interrupters > HCS_MAX_INTRS(xhci->hcs_params1)) |
5199 | xhci->max_interrupters = HCS_MAX_INTRS(xhci->hcs_params1); |
5200 | |
5201 | xhci->quirks |= quirks; |
5202 | |
5203 | if (get_quirks) |
5204 | get_quirks(dev, xhci); |
5205 | |
5206 | /* In xhci controllers which follow xhci 1.0 spec gives a spurious |
5207 | * success event after a short transfer. This quirk will ignore such |
5208 | * spurious event. |
5209 | */ |
5210 | if (xhci->hci_version > 0x96) |
5211 | xhci->quirks |= XHCI_SPURIOUS_SUCCESS; |
5212 | |
5213 | /* Make sure the HC is halted. */ |
5214 | retval = xhci_halt(xhci); |
5215 | if (retval) |
5216 | return retval; |
5217 | |
5218 | xhci_zero_64b_regs(xhci); |
5219 | |
5220 | xhci_dbg(xhci, "Resetting HCD\n" ); |
5221 | /* Reset the internal HC memory state and registers. */ |
5222 | retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC); |
5223 | if (retval) |
5224 | return retval; |
5225 | xhci_dbg(xhci, "Reset complete\n" ); |
5226 | |
5227 | /* |
5228 | * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0) |
5229 | * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit |
5230 | * address memory pointers actually. So, this driver clears the AC64 |
5231 | * bit of xhci->hcc_params to call dma_set_coherent_mask(dev, |
5232 | * DMA_BIT_MASK(32)) in this xhci_gen_setup(). |
5233 | */ |
5234 | if (xhci->quirks & XHCI_NO_64BIT_SUPPORT) |
5235 | xhci->hcc_params &= ~BIT(0); |
5236 | |
5237 | /* Set dma_mask and coherent_dma_mask to 64-bits, |
5238 | * if xHC supports 64-bit addressing */ |
5239 | if (HCC_64BIT_ADDR(xhci->hcc_params) && |
5240 | !dma_set_mask(dev, DMA_BIT_MASK(64))) { |
5241 | xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n" ); |
5242 | dma_set_coherent_mask(dev, DMA_BIT_MASK(64)); |
5243 | } else { |
5244 | /* |
5245 | * This is to avoid error in cases where a 32-bit USB |
5246 | * controller is used on a 64-bit capable system. |
5247 | */ |
5248 | retval = dma_set_mask(dev, DMA_BIT_MASK(32)); |
5249 | if (retval) |
5250 | return retval; |
5251 | xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n" ); |
5252 | dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); |
5253 | } |
5254 | |
5255 | xhci_dbg(xhci, "Calling HCD init\n" ); |
5256 | /* Initialize HCD and host controller data structures. */ |
5257 | retval = xhci_init(hcd); |
5258 | if (retval) |
5259 | return retval; |
5260 | xhci_dbg(xhci, "Called HCD init\n" ); |
5261 | |
5262 | if (xhci_hcd_is_usb3(hcd)) |
5263 | xhci_hcd_init_usb3_data(xhci, hcd); |
5264 | else |
5265 | xhci_hcd_init_usb2_data(xhci, hcd); |
5266 | |
5267 | xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n" , |
5268 | xhci->hcc_params, xhci->hci_version, xhci->quirks); |
5269 | |
5270 | return 0; |
5271 | } |
5272 | EXPORT_SYMBOL_GPL(xhci_gen_setup); |
5273 | |
5274 | static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd, |
5275 | struct usb_host_endpoint *ep) |
5276 | { |
5277 | struct xhci_hcd *xhci; |
5278 | struct usb_device *udev; |
5279 | unsigned int slot_id; |
5280 | unsigned int ep_index; |
5281 | unsigned long flags; |
5282 | |
5283 | xhci = hcd_to_xhci(hcd); |
5284 | |
5285 | spin_lock_irqsave(&xhci->lock, flags); |
5286 | udev = (struct usb_device *)ep->hcpriv; |
5287 | slot_id = udev->slot_id; |
5288 | ep_index = xhci_get_endpoint_index(&ep->desc); |
5289 | |
5290 | xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT; |
5291 | xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index); |
5292 | spin_unlock_irqrestore(lock: &xhci->lock, flags); |
5293 | } |
5294 | |
5295 | static const struct hc_driver xhci_hc_driver = { |
5296 | .description = "xhci-hcd" , |
5297 | .product_desc = "xHCI Host Controller" , |
5298 | .hcd_priv_size = sizeof(struct xhci_hcd), |
5299 | |
5300 | /* |
5301 | * generic hardware linkage |
5302 | */ |
5303 | .irq = xhci_irq, |
5304 | .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED | |
5305 | HCD_BH, |
5306 | |
5307 | /* |
5308 | * basic lifecycle operations |
5309 | */ |
5310 | .reset = NULL, /* set in xhci_init_driver() */ |
5311 | .start = xhci_run, |
5312 | .stop = xhci_stop, |
5313 | .shutdown = xhci_shutdown, |
5314 | |
5315 | /* |
5316 | * managing i/o requests and associated device resources |
5317 | */ |
5318 | .map_urb_for_dma = xhci_map_urb_for_dma, |
5319 | .unmap_urb_for_dma = xhci_unmap_urb_for_dma, |
5320 | .urb_enqueue = xhci_urb_enqueue, |
5321 | .urb_dequeue = xhci_urb_dequeue, |
5322 | .alloc_dev = xhci_alloc_dev, |
5323 | .free_dev = xhci_free_dev, |
5324 | .alloc_streams = xhci_alloc_streams, |
5325 | .free_streams = xhci_free_streams, |
5326 | .add_endpoint = xhci_add_endpoint, |
5327 | .drop_endpoint = xhci_drop_endpoint, |
5328 | .endpoint_disable = xhci_endpoint_disable, |
5329 | .endpoint_reset = xhci_endpoint_reset, |
5330 | .check_bandwidth = xhci_check_bandwidth, |
5331 | .reset_bandwidth = xhci_reset_bandwidth, |
5332 | .address_device = xhci_address_device, |
5333 | .enable_device = xhci_enable_device, |
5334 | .update_hub_device = xhci_update_hub_device, |
5335 | .reset_device = xhci_discover_or_reset_device, |
5336 | |
5337 | /* |
5338 | * scheduling support |
5339 | */ |
5340 | .get_frame_number = xhci_get_frame, |
5341 | |
5342 | /* |
5343 | * root hub support |
5344 | */ |
5345 | .hub_control = xhci_hub_control, |
5346 | .hub_status_data = xhci_hub_status_data, |
5347 | .bus_suspend = xhci_bus_suspend, |
5348 | .bus_resume = xhci_bus_resume, |
5349 | .get_resuming_ports = xhci_get_resuming_ports, |
5350 | |
5351 | /* |
5352 | * call back when device connected and addressed |
5353 | */ |
5354 | .update_device = xhci_update_device, |
5355 | .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm, |
5356 | .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout, |
5357 | .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout, |
5358 | .find_raw_port_number = xhci_find_raw_port_number, |
5359 | .clear_tt_buffer_complete = xhci_clear_tt_buffer_complete, |
5360 | }; |
5361 | |
5362 | void xhci_init_driver(struct hc_driver *drv, |
5363 | const struct xhci_driver_overrides *over) |
5364 | { |
5365 | BUG_ON(!over); |
5366 | |
5367 | /* Copy the generic table to drv then apply the overrides */ |
5368 | *drv = xhci_hc_driver; |
5369 | |
5370 | if (over) { |
5371 | drv->hcd_priv_size += over->extra_priv_size; |
5372 | if (over->reset) |
5373 | drv->reset = over->reset; |
5374 | if (over->start) |
5375 | drv->start = over->start; |
5376 | if (over->add_endpoint) |
5377 | drv->add_endpoint = over->add_endpoint; |
5378 | if (over->drop_endpoint) |
5379 | drv->drop_endpoint = over->drop_endpoint; |
5380 | if (over->check_bandwidth) |
5381 | drv->check_bandwidth = over->check_bandwidth; |
5382 | if (over->reset_bandwidth) |
5383 | drv->reset_bandwidth = over->reset_bandwidth; |
5384 | if (over->update_hub_device) |
5385 | drv->update_hub_device = over->update_hub_device; |
5386 | if (over->hub_control) |
5387 | drv->hub_control = over->hub_control; |
5388 | } |
5389 | } |
5390 | EXPORT_SYMBOL_GPL(xhci_init_driver); |
5391 | |
5392 | MODULE_DESCRIPTION(DRIVER_DESC); |
5393 | MODULE_AUTHOR(DRIVER_AUTHOR); |
5394 | MODULE_LICENSE("GPL" ); |
5395 | |
5396 | static int __init xhci_hcd_init(void) |
5397 | { |
5398 | /* |
5399 | * Check the compiler generated sizes of structures that must be laid |
5400 | * out in specific ways for hardware access. |
5401 | */ |
5402 | BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); |
5403 | BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8); |
5404 | BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8); |
5405 | /* xhci_device_control has eight fields, and also |
5406 | * embeds one xhci_slot_ctx and 31 xhci_ep_ctx |
5407 | */ |
5408 | BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); |
5409 | BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); |
5410 | BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); |
5411 | BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8); |
5412 | BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8); |
5413 | /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */ |
5414 | BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); |
5415 | |
5416 | if (usb_disabled()) |
5417 | return -ENODEV; |
5418 | |
5419 | xhci_debugfs_create_root(); |
5420 | xhci_dbc_init(); |
5421 | |
5422 | return 0; |
5423 | } |
5424 | |
5425 | /* |
5426 | * If an init function is provided, an exit function must also be provided |
5427 | * to allow module unload. |
5428 | */ |
5429 | static void __exit xhci_hcd_fini(void) |
5430 | { |
5431 | xhci_debugfs_remove_root(); |
5432 | xhci_dbc_exit(); |
5433 | } |
5434 | |
5435 | module_init(xhci_hcd_init); |
5436 | module_exit(xhci_hcd_fini); |
5437 | |