1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Thunderbolt driver - bus logic (NHI independent) |
4 | * |
5 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> |
6 | * Copyright (C) 2019, Intel Corporation |
7 | */ |
8 | |
9 | #include <linux/slab.h> |
10 | #include <linux/errno.h> |
11 | #include <linux/delay.h> |
12 | #include <linux/pm_runtime.h> |
13 | #include <linux/platform_data/x86/apple.h> |
14 | |
15 | #include "tb.h" |
16 | #include "tb_regs.h" |
17 | #include "tunnel.h" |
18 | |
19 | #define TB_TIMEOUT 100 /* ms */ |
20 | |
21 | /* |
22 | * Minimum bandwidth (in Mb/s) that is needed in the single transmitter/receiver |
23 | * direction. This is 40G - 10% guard band bandwidth. |
24 | */ |
25 | #define TB_ASYM_MIN (40000 * 90 / 100) |
26 | |
27 | /* |
28 | * Threshold bandwidth (in Mb/s) that is used to switch the links to |
29 | * asymmetric and back. This is selected as 45G which means when the |
30 | * request is higher than this, we switch the link to asymmetric, and |
31 | * when it is less than this we switch it back. The 45G is selected so |
32 | * that we still have 27G (of the total 72G) for bulk PCIe traffic when |
33 | * switching back to symmetric. |
34 | */ |
35 | #define TB_ASYM_THRESHOLD 45000 |
36 | |
37 | #define MAX_GROUPS 7 /* max Group_ID is 7 */ |
38 | |
39 | static unsigned int asym_threshold = TB_ASYM_THRESHOLD; |
40 | module_param_named(asym_threshold, asym_threshold, uint, 0444); |
41 | MODULE_PARM_DESC(asym_threshold, |
42 | "threshold (Mb/s) when to Gen 4 switch link symmetry. 0 disables. (default: " |
43 | __MODULE_STRING(TB_ASYM_THRESHOLD) ")" ); |
44 | |
45 | /** |
46 | * struct tb_cm - Simple Thunderbolt connection manager |
47 | * @tunnel_list: List of active tunnels |
48 | * @dp_resources: List of available DP resources for DP tunneling |
49 | * @hotplug_active: tb_handle_hotplug will stop progressing plug |
50 | * events and exit if this is not set (it needs to |
51 | * acquire the lock one more time). Used to drain wq |
52 | * after cfg has been paused. |
53 | * @remove_work: Work used to remove any unplugged routers after |
54 | * runtime resume |
55 | * @groups: Bandwidth groups used in this domain. |
56 | */ |
57 | struct tb_cm { |
58 | struct list_head tunnel_list; |
59 | struct list_head dp_resources; |
60 | bool hotplug_active; |
61 | struct delayed_work remove_work; |
62 | struct tb_bandwidth_group groups[MAX_GROUPS]; |
63 | }; |
64 | |
65 | static inline struct tb *tcm_to_tb(struct tb_cm *tcm) |
66 | { |
67 | return ((void *)tcm - sizeof(struct tb)); |
68 | } |
69 | |
70 | struct tb_hotplug_event { |
71 | struct work_struct work; |
72 | struct tb *tb; |
73 | u64 route; |
74 | u8 port; |
75 | bool unplug; |
76 | }; |
77 | |
78 | static void tb_init_bandwidth_groups(struct tb_cm *tcm) |
79 | { |
80 | int i; |
81 | |
82 | for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) { |
83 | struct tb_bandwidth_group *group = &tcm->groups[i]; |
84 | |
85 | group->tb = tcm_to_tb(tcm); |
86 | group->index = i + 1; |
87 | INIT_LIST_HEAD(list: &group->ports); |
88 | } |
89 | } |
90 | |
91 | static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group, |
92 | struct tb_port *in) |
93 | { |
94 | if (!group || WARN_ON(in->group)) |
95 | return; |
96 | |
97 | in->group = group; |
98 | list_add_tail(new: &in->group_list, head: &group->ports); |
99 | |
100 | tb_port_dbg(in, "attached to bandwidth group %d\n" , group->index); |
101 | } |
102 | |
103 | static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm) |
104 | { |
105 | int i; |
106 | |
107 | for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) { |
108 | struct tb_bandwidth_group *group = &tcm->groups[i]; |
109 | |
110 | if (list_empty(head: &group->ports)) |
111 | return group; |
112 | } |
113 | |
114 | return NULL; |
115 | } |
116 | |
117 | static struct tb_bandwidth_group * |
118 | tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in, |
119 | struct tb_port *out) |
120 | { |
121 | struct tb_bandwidth_group *group; |
122 | struct tb_tunnel *tunnel; |
123 | |
124 | /* |
125 | * Find all DP tunnels that go through all the same USB4 links |
126 | * as this one. Because we always setup tunnels the same way we |
127 | * can just check for the routers at both ends of the tunnels |
128 | * and if they are the same we have a match. |
129 | */ |
130 | list_for_each_entry(tunnel, &tcm->tunnel_list, list) { |
131 | if (!tb_tunnel_is_dp(tunnel)) |
132 | continue; |
133 | |
134 | if (tunnel->src_port->sw == in->sw && |
135 | tunnel->dst_port->sw == out->sw) { |
136 | group = tunnel->src_port->group; |
137 | if (group) { |
138 | tb_bandwidth_group_attach_port(group, in); |
139 | return group; |
140 | } |
141 | } |
142 | } |
143 | |
144 | /* Pick up next available group then */ |
145 | group = tb_find_free_bandwidth_group(tcm); |
146 | if (group) |
147 | tb_bandwidth_group_attach_port(group, in); |
148 | else |
149 | tb_port_warn(in, "no available bandwidth groups\n" ); |
150 | |
151 | return group; |
152 | } |
153 | |
154 | static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in, |
155 | struct tb_port *out) |
156 | { |
157 | if (usb4_dp_port_bandwidth_mode_enabled(port: in)) { |
158 | int index, i; |
159 | |
160 | index = usb4_dp_port_group_id(port: in); |
161 | for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) { |
162 | if (tcm->groups[i].index == index) { |
163 | tb_bandwidth_group_attach_port(group: &tcm->groups[i], in); |
164 | return; |
165 | } |
166 | } |
167 | } |
168 | |
169 | tb_attach_bandwidth_group(tcm, in, out); |
170 | } |
171 | |
172 | static void tb_detach_bandwidth_group(struct tb_port *in) |
173 | { |
174 | struct tb_bandwidth_group *group = in->group; |
175 | |
176 | if (group) { |
177 | in->group = NULL; |
178 | list_del_init(entry: &in->group_list); |
179 | |
180 | tb_port_dbg(in, "detached from bandwidth group %d\n" , group->index); |
181 | } |
182 | } |
183 | |
184 | static void tb_handle_hotplug(struct work_struct *work); |
185 | |
186 | static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug) |
187 | { |
188 | struct tb_hotplug_event *ev; |
189 | |
190 | ev = kmalloc(size: sizeof(*ev), GFP_KERNEL); |
191 | if (!ev) |
192 | return; |
193 | |
194 | ev->tb = tb; |
195 | ev->route = route; |
196 | ev->port = port; |
197 | ev->unplug = unplug; |
198 | INIT_WORK(&ev->work, tb_handle_hotplug); |
199 | queue_work(wq: tb->wq, work: &ev->work); |
200 | } |
201 | |
202 | /* enumeration & hot plug handling */ |
203 | |
204 | static void tb_add_dp_resources(struct tb_switch *sw) |
205 | { |
206 | struct tb_cm *tcm = tb_priv(tb: sw->tb); |
207 | struct tb_port *port; |
208 | |
209 | tb_switch_for_each_port(sw, port) { |
210 | if (!tb_port_is_dpin(port)) |
211 | continue; |
212 | |
213 | if (!tb_switch_query_dp_resource(sw, in: port)) |
214 | continue; |
215 | |
216 | list_add(new: &port->list, head: &tcm->dp_resources); |
217 | tb_port_dbg(port, "DP IN resource available\n" ); |
218 | } |
219 | } |
220 | |
221 | static void tb_remove_dp_resources(struct tb_switch *sw) |
222 | { |
223 | struct tb_cm *tcm = tb_priv(tb: sw->tb); |
224 | struct tb_port *port, *tmp; |
225 | |
226 | /* Clear children resources first */ |
227 | tb_switch_for_each_port(sw, port) { |
228 | if (tb_port_has_remote(port)) |
229 | tb_remove_dp_resources(sw: port->remote->sw); |
230 | } |
231 | |
232 | list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) { |
233 | if (port->sw == sw) { |
234 | tb_port_dbg(port, "DP OUT resource unavailable\n" ); |
235 | list_del_init(entry: &port->list); |
236 | } |
237 | } |
238 | } |
239 | |
240 | static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port) |
241 | { |
242 | struct tb_cm *tcm = tb_priv(tb); |
243 | struct tb_port *p; |
244 | |
245 | list_for_each_entry(p, &tcm->dp_resources, list) { |
246 | if (p == port) |
247 | return; |
248 | } |
249 | |
250 | tb_port_dbg(port, "DP %s resource available discovered\n" , |
251 | tb_port_is_dpin(port) ? "IN" : "OUT" ); |
252 | list_add_tail(new: &port->list, head: &tcm->dp_resources); |
253 | } |
254 | |
255 | static void tb_discover_dp_resources(struct tb *tb) |
256 | { |
257 | struct tb_cm *tcm = tb_priv(tb); |
258 | struct tb_tunnel *tunnel; |
259 | |
260 | list_for_each_entry(tunnel, &tcm->tunnel_list, list) { |
261 | if (tb_tunnel_is_dp(tunnel)) |
262 | tb_discover_dp_resource(tb, port: tunnel->dst_port); |
263 | } |
264 | } |
265 | |
266 | /* Enables CL states up to host router */ |
267 | static int tb_enable_clx(struct tb_switch *sw) |
268 | { |
269 | struct tb_cm *tcm = tb_priv(tb: sw->tb); |
270 | unsigned int clx = TB_CL0S | TB_CL1; |
271 | const struct tb_tunnel *tunnel; |
272 | int ret; |
273 | |
274 | /* |
275 | * Currently only enable CLx for the first link. This is enough |
276 | * to allow the CPU to save energy at least on Intel hardware |
277 | * and makes it slightly simpler to implement. We may change |
278 | * this in the future to cover the whole topology if it turns |
279 | * out to be beneficial. |
280 | */ |
281 | while (sw && tb_switch_depth(sw) > 1) |
282 | sw = tb_switch_parent(sw); |
283 | |
284 | if (!sw) |
285 | return 0; |
286 | |
287 | if (tb_switch_depth(sw) != 1) |
288 | return 0; |
289 | |
290 | /* |
291 | * If we are re-enabling then check if there is an active DMA |
292 | * tunnel and in that case bail out. |
293 | */ |
294 | list_for_each_entry(tunnel, &tcm->tunnel_list, list) { |
295 | if (tb_tunnel_is_dma(tunnel)) { |
296 | if (tb_tunnel_port_on_path(tunnel, port: tb_upstream_port(sw))) |
297 | return 0; |
298 | } |
299 | } |
300 | |
301 | /* |
302 | * Initially try with CL2. If that's not supported by the |
303 | * topology try with CL0s and CL1 and then give up. |
304 | */ |
305 | ret = tb_switch_clx_enable(sw, clx: clx | TB_CL2); |
306 | if (ret == -EOPNOTSUPP) |
307 | ret = tb_switch_clx_enable(sw, clx); |
308 | return ret == -EOPNOTSUPP ? 0 : ret; |
309 | } |
310 | |
311 | /** |
312 | * tb_disable_clx() - Disable CL states up to host router |
313 | * @sw: Router to start |
314 | * |
315 | * Disables CL states from @sw up to the host router. Returns true if |
316 | * any CL state were disabled. This can be used to figure out whether |
317 | * the link was setup by us or the boot firmware so we don't |
318 | * accidentally enable them if they were not enabled during discovery. |
319 | */ |
320 | static bool tb_disable_clx(struct tb_switch *sw) |
321 | { |
322 | bool disabled = false; |
323 | |
324 | do { |
325 | int ret; |
326 | |
327 | ret = tb_switch_clx_disable(sw); |
328 | if (ret > 0) |
329 | disabled = true; |
330 | else if (ret < 0) |
331 | tb_sw_warn(sw, "failed to disable CL states\n" ); |
332 | |
333 | sw = tb_switch_parent(sw); |
334 | } while (sw); |
335 | |
336 | return disabled; |
337 | } |
338 | |
339 | static int tb_increase_switch_tmu_accuracy(struct device *dev, void *data) |
340 | { |
341 | struct tb_switch *sw; |
342 | |
343 | sw = tb_to_switch(dev); |
344 | if (!sw) |
345 | return 0; |
346 | |
347 | if (tb_switch_tmu_is_configured(sw, mode: TB_SWITCH_TMU_MODE_LOWRES)) { |
348 | enum tb_switch_tmu_mode mode; |
349 | int ret; |
350 | |
351 | if (tb_switch_clx_is_enabled(sw, TB_CL1)) |
352 | mode = TB_SWITCH_TMU_MODE_HIFI_UNI; |
353 | else |
354 | mode = TB_SWITCH_TMU_MODE_HIFI_BI; |
355 | |
356 | ret = tb_switch_tmu_configure(sw, mode); |
357 | if (ret) |
358 | return ret; |
359 | |
360 | return tb_switch_tmu_enable(sw); |
361 | } |
362 | |
363 | return 0; |
364 | } |
365 | |
366 | static void tb_increase_tmu_accuracy(struct tb_tunnel *tunnel) |
367 | { |
368 | struct tb_switch *sw; |
369 | |
370 | if (!tunnel) |
371 | return; |
372 | |
373 | /* |
374 | * Once first DP tunnel is established we change the TMU |
375 | * accuracy of first depth child routers (and the host router) |
376 | * to the highest. This is needed for the DP tunneling to work |
377 | * but also allows CL0s. |
378 | * |
379 | * If both routers are v2 then we don't need to do anything as |
380 | * they are using enhanced TMU mode that allows all CLx. |
381 | */ |
382 | sw = tunnel->tb->root_switch; |
383 | device_for_each_child(dev: &sw->dev, NULL, fn: tb_increase_switch_tmu_accuracy); |
384 | } |
385 | |
386 | static int tb_enable_tmu(struct tb_switch *sw) |
387 | { |
388 | int ret; |
389 | |
390 | /* |
391 | * If both routers at the end of the link are v2 we simply |
392 | * enable the enhanched uni-directional mode. That covers all |
393 | * the CL states. For v1 and before we need to use the normal |
394 | * rate to allow CL1 (when supported). Otherwise we keep the TMU |
395 | * running at the highest accuracy. |
396 | */ |
397 | ret = tb_switch_tmu_configure(sw, |
398 | mode: TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI); |
399 | if (ret == -EOPNOTSUPP) { |
400 | if (tb_switch_clx_is_enabled(sw, TB_CL1)) |
401 | ret = tb_switch_tmu_configure(sw, |
402 | mode: TB_SWITCH_TMU_MODE_LOWRES); |
403 | else |
404 | ret = tb_switch_tmu_configure(sw, |
405 | mode: TB_SWITCH_TMU_MODE_HIFI_BI); |
406 | } |
407 | if (ret) |
408 | return ret; |
409 | |
410 | /* If it is already enabled in correct mode, don't touch it */ |
411 | if (tb_switch_tmu_is_enabled(sw)) |
412 | return 0; |
413 | |
414 | ret = tb_switch_tmu_disable(sw); |
415 | if (ret) |
416 | return ret; |
417 | |
418 | ret = tb_switch_tmu_post_time(sw); |
419 | if (ret) |
420 | return ret; |
421 | |
422 | return tb_switch_tmu_enable(sw); |
423 | } |
424 | |
425 | static void tb_switch_discover_tunnels(struct tb_switch *sw, |
426 | struct list_head *list, |
427 | bool alloc_hopids) |
428 | { |
429 | struct tb *tb = sw->tb; |
430 | struct tb_port *port; |
431 | |
432 | tb_switch_for_each_port(sw, port) { |
433 | struct tb_tunnel *tunnel = NULL; |
434 | |
435 | switch (port->config.type) { |
436 | case TB_TYPE_DP_HDMI_IN: |
437 | tunnel = tb_tunnel_discover_dp(tb, in: port, alloc_hopid: alloc_hopids); |
438 | tb_increase_tmu_accuracy(tunnel); |
439 | break; |
440 | |
441 | case TB_TYPE_PCIE_DOWN: |
442 | tunnel = tb_tunnel_discover_pci(tb, down: port, alloc_hopid: alloc_hopids); |
443 | break; |
444 | |
445 | case TB_TYPE_USB3_DOWN: |
446 | tunnel = tb_tunnel_discover_usb3(tb, down: port, alloc_hopid: alloc_hopids); |
447 | break; |
448 | |
449 | default: |
450 | break; |
451 | } |
452 | |
453 | if (tunnel) |
454 | list_add_tail(new: &tunnel->list, head: list); |
455 | } |
456 | |
457 | tb_switch_for_each_port(sw, port) { |
458 | if (tb_port_has_remote(port)) { |
459 | tb_switch_discover_tunnels(sw: port->remote->sw, list, |
460 | alloc_hopids); |
461 | } |
462 | } |
463 | } |
464 | |
465 | static void tb_discover_tunnels(struct tb *tb) |
466 | { |
467 | struct tb_cm *tcm = tb_priv(tb); |
468 | struct tb_tunnel *tunnel; |
469 | |
470 | tb_switch_discover_tunnels(sw: tb->root_switch, list: &tcm->tunnel_list, alloc_hopids: true); |
471 | |
472 | list_for_each_entry(tunnel, &tcm->tunnel_list, list) { |
473 | if (tb_tunnel_is_pci(tunnel)) { |
474 | struct tb_switch *parent = tunnel->dst_port->sw; |
475 | |
476 | while (parent != tunnel->src_port->sw) { |
477 | parent->boot = true; |
478 | parent = tb_switch_parent(sw: parent); |
479 | } |
480 | } else if (tb_tunnel_is_dp(tunnel)) { |
481 | struct tb_port *in = tunnel->src_port; |
482 | struct tb_port *out = tunnel->dst_port; |
483 | |
484 | /* Keep the domain from powering down */ |
485 | pm_runtime_get_sync(dev: &in->sw->dev); |
486 | pm_runtime_get_sync(dev: &out->sw->dev); |
487 | |
488 | tb_discover_bandwidth_group(tcm, in, out); |
489 | } |
490 | } |
491 | } |
492 | |
493 | static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd) |
494 | { |
495 | if (tb_switch_is_usb4(sw: port->sw)) |
496 | return usb4_port_configure_xdomain(port, xd); |
497 | return tb_lc_configure_xdomain(port); |
498 | } |
499 | |
500 | static void tb_port_unconfigure_xdomain(struct tb_port *port) |
501 | { |
502 | if (tb_switch_is_usb4(sw: port->sw)) |
503 | usb4_port_unconfigure_xdomain(port); |
504 | else |
505 | tb_lc_unconfigure_xdomain(port); |
506 | |
507 | tb_port_enable(port: port->dual_link_port); |
508 | } |
509 | |
510 | static void tb_scan_xdomain(struct tb_port *port) |
511 | { |
512 | struct tb_switch *sw = port->sw; |
513 | struct tb *tb = sw->tb; |
514 | struct tb_xdomain *xd; |
515 | u64 route; |
516 | |
517 | if (!tb_is_xdomain_enabled()) |
518 | return; |
519 | |
520 | route = tb_downstream_route(port); |
521 | xd = tb_xdomain_find_by_route(tb, route); |
522 | if (xd) { |
523 | tb_xdomain_put(xd); |
524 | return; |
525 | } |
526 | |
527 | xd = tb_xdomain_alloc(tb, parent: &sw->dev, route, local_uuid: tb->root_switch->uuid, |
528 | NULL); |
529 | if (xd) { |
530 | tb_port_at(route, sw)->xdomain = xd; |
531 | tb_port_configure_xdomain(port, xd); |
532 | tb_xdomain_add(xd); |
533 | } |
534 | } |
535 | |
536 | /** |
537 | * tb_find_unused_port() - return the first inactive port on @sw |
538 | * @sw: Switch to find the port on |
539 | * @type: Port type to look for |
540 | */ |
541 | static struct tb_port *tb_find_unused_port(struct tb_switch *sw, |
542 | enum tb_port_type type) |
543 | { |
544 | struct tb_port *port; |
545 | |
546 | tb_switch_for_each_port(sw, port) { |
547 | if (tb_is_upstream_port(port)) |
548 | continue; |
549 | if (port->config.type != type) |
550 | continue; |
551 | if (!port->cap_adap) |
552 | continue; |
553 | if (tb_port_is_enabled(port)) |
554 | continue; |
555 | return port; |
556 | } |
557 | return NULL; |
558 | } |
559 | |
560 | static struct tb_port *tb_find_usb3_down(struct tb_switch *sw, |
561 | const struct tb_port *port) |
562 | { |
563 | struct tb_port *down; |
564 | |
565 | down = usb4_switch_map_usb3_down(sw, port); |
566 | if (down && !tb_usb3_port_is_enabled(port: down)) |
567 | return down; |
568 | return NULL; |
569 | } |
570 | |
571 | static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type, |
572 | struct tb_port *src_port, |
573 | struct tb_port *dst_port) |
574 | { |
575 | struct tb_cm *tcm = tb_priv(tb); |
576 | struct tb_tunnel *tunnel; |
577 | |
578 | list_for_each_entry(tunnel, &tcm->tunnel_list, list) { |
579 | if (tunnel->type == type && |
580 | ((src_port && src_port == tunnel->src_port) || |
581 | (dst_port && dst_port == tunnel->dst_port))) { |
582 | return tunnel; |
583 | } |
584 | } |
585 | |
586 | return NULL; |
587 | } |
588 | |
589 | static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb, |
590 | struct tb_port *src_port, |
591 | struct tb_port *dst_port) |
592 | { |
593 | struct tb_port *port, *usb3_down; |
594 | struct tb_switch *sw; |
595 | |
596 | /* Pick the router that is deepest in the topology */ |
597 | if (tb_port_path_direction_downstream(src: src_port, dst: dst_port)) |
598 | sw = dst_port->sw; |
599 | else |
600 | sw = src_port->sw; |
601 | |
602 | /* Can't be the host router */ |
603 | if (sw == tb->root_switch) |
604 | return NULL; |
605 | |
606 | /* Find the downstream USB4 port that leads to this router */ |
607 | port = tb_port_at(route: tb_route(sw), sw: tb->root_switch); |
608 | /* Find the corresponding host router USB3 downstream port */ |
609 | usb3_down = usb4_switch_map_usb3_down(sw: tb->root_switch, port); |
610 | if (!usb3_down) |
611 | return NULL; |
612 | |
613 | return tb_find_tunnel(tb, type: TB_TUNNEL_USB3, src_port: usb3_down, NULL); |
614 | } |
615 | |
616 | /** |
617 | * tb_consumed_usb3_pcie_bandwidth() - Consumed USB3/PCIe bandwidth over a single link |
618 | * @tb: Domain structure |
619 | * @src_port: Source protocol adapter |
620 | * @dst_port: Destination protocol adapter |
621 | * @port: USB4 port the consumed bandwidth is calculated |
622 | * @consumed_up: Consumed upsream bandwidth (Mb/s) |
623 | * @consumed_down: Consumed downstream bandwidth (Mb/s) |
624 | * |
625 | * Calculates consumed USB3 and PCIe bandwidth at @port between path |
626 | * from @src_port to @dst_port. Does not take tunnel starting from |
627 | * @src_port and ending from @src_port into account. |
628 | */ |
629 | static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb, |
630 | struct tb_port *src_port, |
631 | struct tb_port *dst_port, |
632 | struct tb_port *port, |
633 | int *consumed_up, |
634 | int *consumed_down) |
635 | { |
636 | int pci_consumed_up, pci_consumed_down; |
637 | struct tb_tunnel *tunnel; |
638 | |
639 | *consumed_up = *consumed_down = 0; |
640 | |
641 | tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); |
642 | if (tunnel && tunnel->src_port != src_port && |
643 | tunnel->dst_port != dst_port) { |
644 | int ret; |
645 | |
646 | ret = tb_tunnel_consumed_bandwidth(tunnel, consumed_up, |
647 | consumed_down); |
648 | if (ret) |
649 | return ret; |
650 | } |
651 | |
652 | /* |
653 | * If there is anything reserved for PCIe bulk traffic take it |
654 | * into account here too. |
655 | */ |
656 | if (tb_tunnel_reserved_pci(port, reserved_up: &pci_consumed_up, reserved_down: &pci_consumed_down)) { |
657 | *consumed_up += pci_consumed_up; |
658 | *consumed_down += pci_consumed_down; |
659 | } |
660 | |
661 | return 0; |
662 | } |
663 | |
664 | /** |
665 | * tb_consumed_dp_bandwidth() - Consumed DP bandwidth over a single link |
666 | * @tb: Domain structure |
667 | * @src_port: Source protocol adapter |
668 | * @dst_port: Destination protocol adapter |
669 | * @port: USB4 port the consumed bandwidth is calculated |
670 | * @consumed_up: Consumed upsream bandwidth (Mb/s) |
671 | * @consumed_down: Consumed downstream bandwidth (Mb/s) |
672 | * |
673 | * Calculates consumed DP bandwidth at @port between path from @src_port |
674 | * to @dst_port. Does not take tunnel starting from @src_port and ending |
675 | * from @src_port into account. |
676 | */ |
677 | static int tb_consumed_dp_bandwidth(struct tb *tb, |
678 | struct tb_port *src_port, |
679 | struct tb_port *dst_port, |
680 | struct tb_port *port, |
681 | int *consumed_up, |
682 | int *consumed_down) |
683 | { |
684 | struct tb_cm *tcm = tb_priv(tb); |
685 | struct tb_tunnel *tunnel; |
686 | int ret; |
687 | |
688 | *consumed_up = *consumed_down = 0; |
689 | |
690 | /* |
691 | * Find all DP tunnels that cross the port and reduce |
692 | * their consumed bandwidth from the available. |
693 | */ |
694 | list_for_each_entry(tunnel, &tcm->tunnel_list, list) { |
695 | int dp_consumed_up, dp_consumed_down; |
696 | |
697 | if (tb_tunnel_is_invalid(tunnel)) |
698 | continue; |
699 | |
700 | if (!tb_tunnel_is_dp(tunnel)) |
701 | continue; |
702 | |
703 | if (!tb_tunnel_port_on_path(tunnel, port)) |
704 | continue; |
705 | |
706 | /* |
707 | * Ignore the DP tunnel between src_port and dst_port |
708 | * because it is the same tunnel and we may be |
709 | * re-calculating estimated bandwidth. |
710 | */ |
711 | if (tunnel->src_port == src_port && |
712 | tunnel->dst_port == dst_port) |
713 | continue; |
714 | |
715 | ret = tb_tunnel_consumed_bandwidth(tunnel, consumed_up: &dp_consumed_up, |
716 | consumed_down: &dp_consumed_down); |
717 | if (ret) |
718 | return ret; |
719 | |
720 | *consumed_up += dp_consumed_up; |
721 | *consumed_down += dp_consumed_down; |
722 | } |
723 | |
724 | return 0; |
725 | } |
726 | |
727 | static bool tb_asym_supported(struct tb_port *src_port, struct tb_port *dst_port, |
728 | struct tb_port *port) |
729 | { |
730 | bool downstream = tb_port_path_direction_downstream(src: src_port, dst: dst_port); |
731 | enum tb_link_width width; |
732 | |
733 | if (tb_is_upstream_port(port)) |
734 | width = downstream ? TB_LINK_WIDTH_ASYM_RX : TB_LINK_WIDTH_ASYM_TX; |
735 | else |
736 | width = downstream ? TB_LINK_WIDTH_ASYM_TX : TB_LINK_WIDTH_ASYM_RX; |
737 | |
738 | return tb_port_width_supported(port, width); |
739 | } |
740 | |
741 | /** |
742 | * tb_maximum_bandwidth() - Maximum bandwidth over a single link |
743 | * @tb: Domain structure |
744 | * @src_port: Source protocol adapter |
745 | * @dst_port: Destination protocol adapter |
746 | * @port: USB4 port the total bandwidth is calculated |
747 | * @max_up: Maximum upstream bandwidth (Mb/s) |
748 | * @max_down: Maximum downstream bandwidth (Mb/s) |
749 | * @include_asym: Include bandwidth if the link is switched from |
750 | * symmetric to asymmetric |
751 | * |
752 | * Returns maximum possible bandwidth in @max_up and @max_down over a |
753 | * single link at @port. If @include_asym is set then includes the |
754 | * additional banwdith if the links are transitioned into asymmetric to |
755 | * direction from @src_port to @dst_port. |
756 | */ |
757 | static int tb_maximum_bandwidth(struct tb *tb, struct tb_port *src_port, |
758 | struct tb_port *dst_port, struct tb_port *port, |
759 | int *max_up, int *max_down, bool include_asym) |
760 | { |
761 | bool downstream = tb_port_path_direction_downstream(src: src_port, dst: dst_port); |
762 | int link_speed, link_width, up_bw, down_bw; |
763 | |
764 | /* |
765 | * Can include asymmetric, only if it is actually supported by |
766 | * the lane adapter. |
767 | */ |
768 | if (!tb_asym_supported(src_port, dst_port, port)) |
769 | include_asym = false; |
770 | |
771 | if (tb_is_upstream_port(port)) { |
772 | link_speed = port->sw->link_speed; |
773 | /* |
774 | * sw->link_width is from upstream perspective so we use |
775 | * the opposite for downstream of the host router. |
776 | */ |
777 | if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) { |
778 | up_bw = link_speed * 3 * 1000; |
779 | down_bw = link_speed * 1 * 1000; |
780 | } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) { |
781 | up_bw = link_speed * 1 * 1000; |
782 | down_bw = link_speed * 3 * 1000; |
783 | } else if (include_asym) { |
784 | /* |
785 | * The link is symmetric at the moment but we |
786 | * can switch it to asymmetric as needed. Report |
787 | * this bandwidth as available (even though it |
788 | * is not yet enabled). |
789 | */ |
790 | if (downstream) { |
791 | up_bw = link_speed * 1 * 1000; |
792 | down_bw = link_speed * 3 * 1000; |
793 | } else { |
794 | up_bw = link_speed * 3 * 1000; |
795 | down_bw = link_speed * 1 * 1000; |
796 | } |
797 | } else { |
798 | up_bw = link_speed * port->sw->link_width * 1000; |
799 | down_bw = up_bw; |
800 | } |
801 | } else { |
802 | link_speed = tb_port_get_link_speed(port); |
803 | if (link_speed < 0) |
804 | return link_speed; |
805 | |
806 | link_width = tb_port_get_link_width(port); |
807 | if (link_width < 0) |
808 | return link_width; |
809 | |
810 | if (link_width == TB_LINK_WIDTH_ASYM_TX) { |
811 | up_bw = link_speed * 1 * 1000; |
812 | down_bw = link_speed * 3 * 1000; |
813 | } else if (link_width == TB_LINK_WIDTH_ASYM_RX) { |
814 | up_bw = link_speed * 3 * 1000; |
815 | down_bw = link_speed * 1 * 1000; |
816 | } else if (include_asym) { |
817 | /* |
818 | * The link is symmetric at the moment but we |
819 | * can switch it to asymmetric as needed. Report |
820 | * this bandwidth as available (even though it |
821 | * is not yet enabled). |
822 | */ |
823 | if (downstream) { |
824 | up_bw = link_speed * 1 * 1000; |
825 | down_bw = link_speed * 3 * 1000; |
826 | } else { |
827 | up_bw = link_speed * 3 * 1000; |
828 | down_bw = link_speed * 1 * 1000; |
829 | } |
830 | } else { |
831 | up_bw = link_speed * link_width * 1000; |
832 | down_bw = up_bw; |
833 | } |
834 | } |
835 | |
836 | /* Leave 10% guard band */ |
837 | *max_up = up_bw - up_bw / 10; |
838 | *max_down = down_bw - down_bw / 10; |
839 | |
840 | tb_port_dbg(port, "link maximum bandwidth %d/%d Mb/s\n" , *max_up, *max_down); |
841 | return 0; |
842 | } |
843 | |
844 | /** |
845 | * tb_available_bandwidth() - Available bandwidth for tunneling |
846 | * @tb: Domain structure |
847 | * @src_port: Source protocol adapter |
848 | * @dst_port: Destination protocol adapter |
849 | * @available_up: Available bandwidth upstream (Mb/s) |
850 | * @available_down: Available bandwidth downstream (Mb/s) |
851 | * @include_asym: Include bandwidth if the link is switched from |
852 | * symmetric to asymmetric |
853 | * |
854 | * Calculates maximum available bandwidth for protocol tunneling between |
855 | * @src_port and @dst_port at the moment. This is minimum of maximum |
856 | * link bandwidth across all links reduced by currently consumed |
857 | * bandwidth on that link. |
858 | * |
859 | * If @include_asym is true then includes also bandwidth that can be |
860 | * added when the links are transitioned into asymmetric (but does not |
861 | * transition the links). |
862 | */ |
863 | static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port, |
864 | struct tb_port *dst_port, int *available_up, |
865 | int *available_down, bool include_asym) |
866 | { |
867 | struct tb_port *port; |
868 | int ret; |
869 | |
870 | /* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */ |
871 | *available_up = *available_down = 120000; |
872 | |
873 | /* Find the minimum available bandwidth over all links */ |
874 | tb_for_each_port_on_path(src_port, dst_port, port) { |
875 | int max_up, max_down, consumed_up, consumed_down; |
876 | |
877 | if (!tb_port_is_null(port)) |
878 | continue; |
879 | |
880 | ret = tb_maximum_bandwidth(tb, src_port, dst_port, port, |
881 | max_up: &max_up, max_down: &max_down, include_asym); |
882 | if (ret) |
883 | return ret; |
884 | |
885 | ret = tb_consumed_usb3_pcie_bandwidth(tb, src_port, dst_port, |
886 | port, consumed_up: &consumed_up, |
887 | consumed_down: &consumed_down); |
888 | if (ret) |
889 | return ret; |
890 | max_up -= consumed_up; |
891 | max_down -= consumed_down; |
892 | |
893 | ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, port, |
894 | consumed_up: &consumed_up, consumed_down: &consumed_down); |
895 | if (ret) |
896 | return ret; |
897 | max_up -= consumed_up; |
898 | max_down -= consumed_down; |
899 | |
900 | if (max_up < *available_up) |
901 | *available_up = max_up; |
902 | if (max_down < *available_down) |
903 | *available_down = max_down; |
904 | } |
905 | |
906 | if (*available_up < 0) |
907 | *available_up = 0; |
908 | if (*available_down < 0) |
909 | *available_down = 0; |
910 | |
911 | return 0; |
912 | } |
913 | |
914 | static int tb_release_unused_usb3_bandwidth(struct tb *tb, |
915 | struct tb_port *src_port, |
916 | struct tb_port *dst_port) |
917 | { |
918 | struct tb_tunnel *tunnel; |
919 | |
920 | tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); |
921 | return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0; |
922 | } |
923 | |
924 | static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port, |
925 | struct tb_port *dst_port) |
926 | { |
927 | int ret, available_up, available_down; |
928 | struct tb_tunnel *tunnel; |
929 | |
930 | tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); |
931 | if (!tunnel) |
932 | return; |
933 | |
934 | tb_tunnel_dbg(tunnel, "reclaiming unused bandwidth\n" ); |
935 | |
936 | /* |
937 | * Calculate available bandwidth for the first hop USB3 tunnel. |
938 | * That determines the whole USB3 bandwidth for this branch. |
939 | */ |
940 | ret = tb_available_bandwidth(tb, src_port: tunnel->src_port, dst_port: tunnel->dst_port, |
941 | available_up: &available_up, available_down: &available_down, include_asym: false); |
942 | if (ret) { |
943 | tb_tunnel_warn(tunnel, "failed to calculate available bandwidth\n" ); |
944 | return; |
945 | } |
946 | |
947 | tb_tunnel_dbg(tunnel, "available bandwidth %d/%d Mb/s\n" , available_up, |
948 | available_down); |
949 | |
950 | tb_tunnel_reclaim_available_bandwidth(tunnel, available_up: &available_up, available_down: &available_down); |
951 | } |
952 | |
953 | static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw) |
954 | { |
955 | struct tb_switch *parent = tb_switch_parent(sw); |
956 | int ret, available_up, available_down; |
957 | struct tb_port *up, *down, *port; |
958 | struct tb_cm *tcm = tb_priv(tb); |
959 | struct tb_tunnel *tunnel; |
960 | |
961 | if (!tb_acpi_may_tunnel_usb3()) { |
962 | tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n" ); |
963 | return 0; |
964 | } |
965 | |
966 | up = tb_switch_find_port(sw, type: TB_TYPE_USB3_UP); |
967 | if (!up) |
968 | return 0; |
969 | |
970 | if (!sw->link_usb4) |
971 | return 0; |
972 | |
973 | /* |
974 | * Look up available down port. Since we are chaining it should |
975 | * be found right above this switch. |
976 | */ |
977 | port = tb_switch_downstream_port(sw); |
978 | down = tb_find_usb3_down(sw: parent, port); |
979 | if (!down) |
980 | return 0; |
981 | |
982 | if (tb_route(sw: parent)) { |
983 | struct tb_port *parent_up; |
984 | /* |
985 | * Check first that the parent switch has its upstream USB3 |
986 | * port enabled. Otherwise the chain is not complete and |
987 | * there is no point setting up a new tunnel. |
988 | */ |
989 | parent_up = tb_switch_find_port(sw: parent, type: TB_TYPE_USB3_UP); |
990 | if (!parent_up || !tb_port_is_enabled(port: parent_up)) |
991 | return 0; |
992 | |
993 | /* Make all unused bandwidth available for the new tunnel */ |
994 | ret = tb_release_unused_usb3_bandwidth(tb, src_port: down, dst_port: up); |
995 | if (ret) |
996 | return ret; |
997 | } |
998 | |
999 | ret = tb_available_bandwidth(tb, src_port: down, dst_port: up, available_up: &available_up, available_down: &available_down, |
1000 | include_asym: false); |
1001 | if (ret) |
1002 | goto err_reclaim; |
1003 | |
1004 | tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n" , |
1005 | available_up, available_down); |
1006 | |
1007 | tunnel = tb_tunnel_alloc_usb3(tb, up, down, max_up: available_up, |
1008 | max_down: available_down); |
1009 | if (!tunnel) { |
1010 | ret = -ENOMEM; |
1011 | goto err_reclaim; |
1012 | } |
1013 | |
1014 | if (tb_tunnel_activate(tunnel)) { |
1015 | tb_port_info(up, |
1016 | "USB3 tunnel activation failed, aborting\n" ); |
1017 | ret = -EIO; |
1018 | goto err_free; |
1019 | } |
1020 | |
1021 | list_add_tail(new: &tunnel->list, head: &tcm->tunnel_list); |
1022 | if (tb_route(sw: parent)) |
1023 | tb_reclaim_usb3_bandwidth(tb, src_port: down, dst_port: up); |
1024 | |
1025 | return 0; |
1026 | |
1027 | err_free: |
1028 | tb_tunnel_free(tunnel); |
1029 | err_reclaim: |
1030 | if (tb_route(sw: parent)) |
1031 | tb_reclaim_usb3_bandwidth(tb, src_port: down, dst_port: up); |
1032 | |
1033 | return ret; |
1034 | } |
1035 | |
1036 | static int tb_create_usb3_tunnels(struct tb_switch *sw) |
1037 | { |
1038 | struct tb_port *port; |
1039 | int ret; |
1040 | |
1041 | if (!tb_acpi_may_tunnel_usb3()) |
1042 | return 0; |
1043 | |
1044 | if (tb_route(sw)) { |
1045 | ret = tb_tunnel_usb3(tb: sw->tb, sw); |
1046 | if (ret) |
1047 | return ret; |
1048 | } |
1049 | |
1050 | tb_switch_for_each_port(sw, port) { |
1051 | if (!tb_port_has_remote(port)) |
1052 | continue; |
1053 | ret = tb_create_usb3_tunnels(sw: port->remote->sw); |
1054 | if (ret) |
1055 | return ret; |
1056 | } |
1057 | |
1058 | return 0; |
1059 | } |
1060 | |
1061 | /** |
1062 | * tb_configure_asym() - Transition links to asymmetric if needed |
1063 | * @tb: Domain structure |
1064 | * @src_port: Source adapter to start the transition |
1065 | * @dst_port: Destination adapter |
1066 | * @requested_up: Additional bandwidth (Mb/s) required upstream |
1067 | * @requested_down: Additional bandwidth (Mb/s) required downstream |
1068 | * |
1069 | * Transition links between @src_port and @dst_port into asymmetric, with |
1070 | * three lanes in the direction from @src_port towards @dst_port and one lane |
1071 | * in the opposite direction, if the bandwidth requirements |
1072 | * (requested + currently consumed) on that link exceed @asym_threshold. |
1073 | * |
1074 | * Must be called with available >= requested over all links. |
1075 | */ |
1076 | static int tb_configure_asym(struct tb *tb, struct tb_port *src_port, |
1077 | struct tb_port *dst_port, int requested_up, |
1078 | int requested_down) |
1079 | { |
1080 | struct tb_switch *sw; |
1081 | bool clx, downstream; |
1082 | struct tb_port *up; |
1083 | int ret = 0; |
1084 | |
1085 | if (!asym_threshold) |
1086 | return 0; |
1087 | |
1088 | /* Disable CL states before doing any transitions */ |
1089 | downstream = tb_port_path_direction_downstream(src: src_port, dst: dst_port); |
1090 | /* Pick up router deepest in the hierarchy */ |
1091 | if (downstream) |
1092 | sw = dst_port->sw; |
1093 | else |
1094 | sw = src_port->sw; |
1095 | |
1096 | clx = tb_disable_clx(sw); |
1097 | |
1098 | tb_for_each_upstream_port_on_path(src_port, dst_port, up) { |
1099 | int consumed_up, consumed_down; |
1100 | enum tb_link_width width; |
1101 | |
1102 | ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, port: up, |
1103 | consumed_up: &consumed_up, consumed_down: &consumed_down); |
1104 | if (ret) |
1105 | break; |
1106 | |
1107 | if (downstream) { |
1108 | /* |
1109 | * Downstream so make sure upstream is within the 36G |
1110 | * (40G - guard band 10%), and the requested is above |
1111 | * what the threshold is. |
1112 | */ |
1113 | if (consumed_up + requested_up >= TB_ASYM_MIN) { |
1114 | ret = -ENOBUFS; |
1115 | break; |
1116 | } |
1117 | /* Does consumed + requested exceed the threshold */ |
1118 | if (consumed_down + requested_down < asym_threshold) |
1119 | continue; |
1120 | |
1121 | width = TB_LINK_WIDTH_ASYM_RX; |
1122 | } else { |
1123 | /* Upstream, the opposite of above */ |
1124 | if (consumed_down + requested_down >= TB_ASYM_MIN) { |
1125 | ret = -ENOBUFS; |
1126 | break; |
1127 | } |
1128 | if (consumed_up + requested_up < asym_threshold) |
1129 | continue; |
1130 | |
1131 | width = TB_LINK_WIDTH_ASYM_TX; |
1132 | } |
1133 | |
1134 | if (up->sw->link_width == width) |
1135 | continue; |
1136 | |
1137 | if (!tb_port_width_supported(port: up, width)) |
1138 | continue; |
1139 | |
1140 | tb_sw_dbg(up->sw, "configuring asymmetric link\n" ); |
1141 | |
1142 | /* |
1143 | * Here requested + consumed > threshold so we need to |
1144 | * transtion the link into asymmetric now. |
1145 | */ |
1146 | ret = tb_switch_set_link_width(sw: up->sw, width); |
1147 | if (ret) { |
1148 | tb_sw_warn(up->sw, "failed to set link width\n" ); |
1149 | break; |
1150 | } |
1151 | } |
1152 | |
1153 | /* Re-enable CL states if they were previosly enabled */ |
1154 | if (clx) |
1155 | tb_enable_clx(sw); |
1156 | |
1157 | return ret; |
1158 | } |
1159 | |
1160 | /** |
1161 | * tb_configure_sym() - Transition links to symmetric if possible |
1162 | * @tb: Domain structure |
1163 | * @src_port: Source adapter to start the transition |
1164 | * @dst_port: Destination adapter |
1165 | * @requested_up: New lower bandwidth request upstream (Mb/s) |
1166 | * @requested_down: New lower bandwidth request downstream (Mb/s) |
1167 | * |
1168 | * Goes over each link from @src_port to @dst_port and tries to |
1169 | * transition the link to symmetric if the currently consumed bandwidth |
1170 | * allows. |
1171 | */ |
1172 | static int tb_configure_sym(struct tb *tb, struct tb_port *src_port, |
1173 | struct tb_port *dst_port, int requested_up, |
1174 | int requested_down) |
1175 | { |
1176 | struct tb_switch *sw; |
1177 | bool clx, downstream; |
1178 | struct tb_port *up; |
1179 | int ret = 0; |
1180 | |
1181 | if (!asym_threshold) |
1182 | return 0; |
1183 | |
1184 | /* Disable CL states before doing any transitions */ |
1185 | downstream = tb_port_path_direction_downstream(src: src_port, dst: dst_port); |
1186 | /* Pick up router deepest in the hierarchy */ |
1187 | if (downstream) |
1188 | sw = dst_port->sw; |
1189 | else |
1190 | sw = src_port->sw; |
1191 | |
1192 | clx = tb_disable_clx(sw); |
1193 | |
1194 | tb_for_each_upstream_port_on_path(src_port, dst_port, up) { |
1195 | int consumed_up, consumed_down; |
1196 | |
1197 | /* Already symmetric */ |
1198 | if (up->sw->link_width <= TB_LINK_WIDTH_DUAL) |
1199 | continue; |
1200 | /* Unplugged, no need to switch */ |
1201 | if (up->sw->is_unplugged) |
1202 | continue; |
1203 | |
1204 | ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, port: up, |
1205 | consumed_up: &consumed_up, consumed_down: &consumed_down); |
1206 | if (ret) |
1207 | break; |
1208 | |
1209 | if (downstream) { |
1210 | /* |
1211 | * Downstream so we want the consumed_down < threshold. |
1212 | * Upstream traffic should be less than 36G (40G |
1213 | * guard band 10%) as the link was configured asymmetric |
1214 | * already. |
1215 | */ |
1216 | if (consumed_down + requested_down >= asym_threshold) |
1217 | continue; |
1218 | } else { |
1219 | if (consumed_up + requested_up >= asym_threshold) |
1220 | continue; |
1221 | } |
1222 | |
1223 | if (up->sw->link_width == TB_LINK_WIDTH_DUAL) |
1224 | continue; |
1225 | |
1226 | tb_sw_dbg(up->sw, "configuring symmetric link\n" ); |
1227 | |
1228 | ret = tb_switch_set_link_width(sw: up->sw, width: TB_LINK_WIDTH_DUAL); |
1229 | if (ret) { |
1230 | tb_sw_warn(up->sw, "failed to set link width\n" ); |
1231 | break; |
1232 | } |
1233 | } |
1234 | |
1235 | /* Re-enable CL states if they were previosly enabled */ |
1236 | if (clx) |
1237 | tb_enable_clx(sw); |
1238 | |
1239 | return ret; |
1240 | } |
1241 | |
1242 | static void tb_configure_link(struct tb_port *down, struct tb_port *up, |
1243 | struct tb_switch *sw) |
1244 | { |
1245 | struct tb *tb = sw->tb; |
1246 | |
1247 | /* Link the routers using both links if available */ |
1248 | down->remote = up; |
1249 | up->remote = down; |
1250 | if (down->dual_link_port && up->dual_link_port) { |
1251 | down->dual_link_port->remote = up->dual_link_port; |
1252 | up->dual_link_port->remote = down->dual_link_port; |
1253 | } |
1254 | |
1255 | /* |
1256 | * Enable lane bonding if the link is currently two single lane |
1257 | * links. |
1258 | */ |
1259 | if (sw->link_width < TB_LINK_WIDTH_DUAL) |
1260 | tb_switch_set_link_width(sw, width: TB_LINK_WIDTH_DUAL); |
1261 | |
1262 | /* |
1263 | * Device router that comes up as symmetric link is |
1264 | * connected deeper in the hierarchy, we transition the links |
1265 | * above into symmetric if bandwidth allows. |
1266 | */ |
1267 | if (tb_switch_depth(sw) > 1 && |
1268 | tb_port_get_link_generation(port: up) >= 4 && |
1269 | up->sw->link_width == TB_LINK_WIDTH_DUAL) { |
1270 | struct tb_port *host_port; |
1271 | |
1272 | host_port = tb_port_at(route: tb_route(sw), sw: tb->root_switch); |
1273 | tb_configure_sym(tb, src_port: host_port, dst_port: up, requested_up: 0, requested_down: 0); |
1274 | } |
1275 | |
1276 | /* Set the link configured */ |
1277 | tb_switch_configure_link(sw); |
1278 | } |
1279 | |
1280 | static void tb_scan_port(struct tb_port *port); |
1281 | |
1282 | /* |
1283 | * tb_scan_switch() - scan for and initialize downstream switches |
1284 | */ |
1285 | static void tb_scan_switch(struct tb_switch *sw) |
1286 | { |
1287 | struct tb_port *port; |
1288 | |
1289 | pm_runtime_get_sync(dev: &sw->dev); |
1290 | |
1291 | tb_switch_for_each_port(sw, port) |
1292 | tb_scan_port(port); |
1293 | |
1294 | pm_runtime_mark_last_busy(dev: &sw->dev); |
1295 | pm_runtime_put_autosuspend(dev: &sw->dev); |
1296 | } |
1297 | |
1298 | /* |
1299 | * tb_scan_port() - check for and initialize switches below port |
1300 | */ |
1301 | static void tb_scan_port(struct tb_port *port) |
1302 | { |
1303 | struct tb_cm *tcm = tb_priv(tb: port->sw->tb); |
1304 | struct tb_port *upstream_port; |
1305 | bool discovery = false; |
1306 | struct tb_switch *sw; |
1307 | |
1308 | if (tb_is_upstream_port(port)) |
1309 | return; |
1310 | |
1311 | if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 && |
1312 | !tb_dp_port_is_enabled(port)) { |
1313 | tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n" ); |
1314 | tb_queue_hotplug(tb: port->sw->tb, route: tb_route(sw: port->sw), port: port->port, |
1315 | unplug: false); |
1316 | return; |
1317 | } |
1318 | |
1319 | if (port->config.type != TB_TYPE_PORT) |
1320 | return; |
1321 | if (port->dual_link_port && port->link_nr) |
1322 | return; /* |
1323 | * Downstream switch is reachable through two ports. |
1324 | * Only scan on the primary port (link_nr == 0). |
1325 | */ |
1326 | |
1327 | if (port->usb4) |
1328 | pm_runtime_get_sync(dev: &port->usb4->dev); |
1329 | |
1330 | if (tb_wait_for_port(port, wait_if_unplugged: false) <= 0) |
1331 | goto out_rpm_put; |
1332 | if (port->remote) { |
1333 | tb_port_dbg(port, "port already has a remote\n" ); |
1334 | goto out_rpm_put; |
1335 | } |
1336 | |
1337 | tb_retimer_scan(port, add: true); |
1338 | |
1339 | sw = tb_switch_alloc(tb: port->sw->tb, parent: &port->sw->dev, |
1340 | route: tb_downstream_route(port)); |
1341 | if (IS_ERR(ptr: sw)) { |
1342 | /* |
1343 | * If there is an error accessing the connected switch |
1344 | * it may be connected to another domain. Also we allow |
1345 | * the other domain to be connected to a max depth switch. |
1346 | */ |
1347 | if (PTR_ERR(ptr: sw) == -EIO || PTR_ERR(ptr: sw) == -EADDRNOTAVAIL) |
1348 | tb_scan_xdomain(port); |
1349 | goto out_rpm_put; |
1350 | } |
1351 | |
1352 | if (tb_switch_configure(sw)) { |
1353 | tb_switch_put(sw); |
1354 | goto out_rpm_put; |
1355 | } |
1356 | |
1357 | /* |
1358 | * If there was previously another domain connected remove it |
1359 | * first. |
1360 | */ |
1361 | if (port->xdomain) { |
1362 | tb_xdomain_remove(xd: port->xdomain); |
1363 | tb_port_unconfigure_xdomain(port); |
1364 | port->xdomain = NULL; |
1365 | } |
1366 | |
1367 | /* |
1368 | * Do not send uevents until we have discovered all existing |
1369 | * tunnels and know which switches were authorized already by |
1370 | * the boot firmware. |
1371 | */ |
1372 | if (!tcm->hotplug_active) { |
1373 | dev_set_uevent_suppress(dev: &sw->dev, val: true); |
1374 | discovery = true; |
1375 | } |
1376 | |
1377 | /* |
1378 | * At the moment Thunderbolt 2 and beyond (devices with LC) we |
1379 | * can support runtime PM. |
1380 | */ |
1381 | sw->rpm = sw->generation > 1; |
1382 | |
1383 | if (tb_switch_add(sw)) { |
1384 | tb_switch_put(sw); |
1385 | goto out_rpm_put; |
1386 | } |
1387 | |
1388 | upstream_port = tb_upstream_port(sw); |
1389 | tb_configure_link(down: port, up: upstream_port, sw); |
1390 | |
1391 | /* |
1392 | * CL0s and CL1 are enabled and supported together. |
1393 | * Silently ignore CLx enabling in case CLx is not supported. |
1394 | */ |
1395 | if (discovery) |
1396 | tb_sw_dbg(sw, "discovery, not touching CL states\n" ); |
1397 | else if (tb_enable_clx(sw)) |
1398 | tb_sw_warn(sw, "failed to enable CL states\n" ); |
1399 | |
1400 | if (tb_enable_tmu(sw)) |
1401 | tb_sw_warn(sw, "failed to enable TMU\n" ); |
1402 | |
1403 | /* |
1404 | * Configuration valid needs to be set after the TMU has been |
1405 | * enabled for the upstream port of the router so we do it here. |
1406 | */ |
1407 | tb_switch_configuration_valid(sw); |
1408 | |
1409 | /* Scan upstream retimers */ |
1410 | tb_retimer_scan(port: upstream_port, add: true); |
1411 | |
1412 | /* |
1413 | * Create USB 3.x tunnels only when the switch is plugged to the |
1414 | * domain. This is because we scan the domain also during discovery |
1415 | * and want to discover existing USB 3.x tunnels before we create |
1416 | * any new. |
1417 | */ |
1418 | if (tcm->hotplug_active && tb_tunnel_usb3(tb: sw->tb, sw)) |
1419 | tb_sw_warn(sw, "USB3 tunnel creation failed\n" ); |
1420 | |
1421 | tb_add_dp_resources(sw); |
1422 | tb_scan_switch(sw); |
1423 | |
1424 | out_rpm_put: |
1425 | if (port->usb4) { |
1426 | pm_runtime_mark_last_busy(dev: &port->usb4->dev); |
1427 | pm_runtime_put_autosuspend(dev: &port->usb4->dev); |
1428 | } |
1429 | } |
1430 | |
1431 | static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel) |
1432 | { |
1433 | struct tb_port *src_port, *dst_port; |
1434 | struct tb *tb; |
1435 | |
1436 | if (!tunnel) |
1437 | return; |
1438 | |
1439 | tb_tunnel_deactivate(tunnel); |
1440 | list_del(entry: &tunnel->list); |
1441 | |
1442 | tb = tunnel->tb; |
1443 | src_port = tunnel->src_port; |
1444 | dst_port = tunnel->dst_port; |
1445 | |
1446 | switch (tunnel->type) { |
1447 | case TB_TUNNEL_DP: |
1448 | tb_detach_bandwidth_group(in: src_port); |
1449 | /* |
1450 | * In case of DP tunnel make sure the DP IN resource is |
1451 | * deallocated properly. |
1452 | */ |
1453 | tb_switch_dealloc_dp_resource(sw: src_port->sw, in: src_port); |
1454 | /* |
1455 | * If bandwidth on a link is < asym_threshold |
1456 | * transition the link to symmetric. |
1457 | */ |
1458 | tb_configure_sym(tb, src_port, dst_port, requested_up: 0, requested_down: 0); |
1459 | /* Now we can allow the domain to runtime suspend again */ |
1460 | pm_runtime_mark_last_busy(dev: &dst_port->sw->dev); |
1461 | pm_runtime_put_autosuspend(dev: &dst_port->sw->dev); |
1462 | pm_runtime_mark_last_busy(dev: &src_port->sw->dev); |
1463 | pm_runtime_put_autosuspend(dev: &src_port->sw->dev); |
1464 | fallthrough; |
1465 | |
1466 | case TB_TUNNEL_USB3: |
1467 | tb_reclaim_usb3_bandwidth(tb, src_port, dst_port); |
1468 | break; |
1469 | |
1470 | default: |
1471 | /* |
1472 | * PCIe and DMA tunnels do not consume guaranteed |
1473 | * bandwidth. |
1474 | */ |
1475 | break; |
1476 | } |
1477 | |
1478 | tb_tunnel_free(tunnel); |
1479 | } |
1480 | |
1481 | /* |
1482 | * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away |
1483 | */ |
1484 | static void tb_free_invalid_tunnels(struct tb *tb) |
1485 | { |
1486 | struct tb_cm *tcm = tb_priv(tb); |
1487 | struct tb_tunnel *tunnel; |
1488 | struct tb_tunnel *n; |
1489 | |
1490 | list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { |
1491 | if (tb_tunnel_is_invalid(tunnel)) |
1492 | tb_deactivate_and_free_tunnel(tunnel); |
1493 | } |
1494 | } |
1495 | |
1496 | /* |
1497 | * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches |
1498 | */ |
1499 | static void tb_free_unplugged_children(struct tb_switch *sw) |
1500 | { |
1501 | struct tb_port *port; |
1502 | |
1503 | tb_switch_for_each_port(sw, port) { |
1504 | if (!tb_port_has_remote(port)) |
1505 | continue; |
1506 | |
1507 | if (port->remote->sw->is_unplugged) { |
1508 | tb_retimer_remove_all(port); |
1509 | tb_remove_dp_resources(sw: port->remote->sw); |
1510 | tb_switch_unconfigure_link(sw: port->remote->sw); |
1511 | tb_switch_set_link_width(sw: port->remote->sw, |
1512 | width: TB_LINK_WIDTH_SINGLE); |
1513 | tb_switch_remove(sw: port->remote->sw); |
1514 | port->remote = NULL; |
1515 | if (port->dual_link_port) |
1516 | port->dual_link_port->remote = NULL; |
1517 | } else { |
1518 | tb_free_unplugged_children(sw: port->remote->sw); |
1519 | } |
1520 | } |
1521 | } |
1522 | |
1523 | static struct tb_port *tb_find_pcie_down(struct tb_switch *sw, |
1524 | const struct tb_port *port) |
1525 | { |
1526 | struct tb_port *down = NULL; |
1527 | |
1528 | /* |
1529 | * To keep plugging devices consistently in the same PCIe |
1530 | * hierarchy, do mapping here for switch downstream PCIe ports. |
1531 | */ |
1532 | if (tb_switch_is_usb4(sw)) { |
1533 | down = usb4_switch_map_pcie_down(sw, port); |
1534 | } else if (!tb_route(sw)) { |
1535 | int phy_port = tb_phy_port_from_link(link: port->port); |
1536 | int index; |
1537 | |
1538 | /* |
1539 | * Hard-coded Thunderbolt port to PCIe down port mapping |
1540 | * per controller. |
1541 | */ |
1542 | if (tb_switch_is_cactus_ridge(sw) || |
1543 | tb_switch_is_alpine_ridge(sw)) |
1544 | index = !phy_port ? 6 : 7; |
1545 | else if (tb_switch_is_falcon_ridge(sw)) |
1546 | index = !phy_port ? 6 : 8; |
1547 | else if (tb_switch_is_titan_ridge(sw)) |
1548 | index = !phy_port ? 8 : 9; |
1549 | else |
1550 | goto out; |
1551 | |
1552 | /* Validate the hard-coding */ |
1553 | if (WARN_ON(index > sw->config.max_port_number)) |
1554 | goto out; |
1555 | |
1556 | down = &sw->ports[index]; |
1557 | } |
1558 | |
1559 | if (down) { |
1560 | if (WARN_ON(!tb_port_is_pcie_down(down))) |
1561 | goto out; |
1562 | if (tb_pci_port_is_enabled(port: down)) |
1563 | goto out; |
1564 | |
1565 | return down; |
1566 | } |
1567 | |
1568 | out: |
1569 | return tb_find_unused_port(sw, type: TB_TYPE_PCIE_DOWN); |
1570 | } |
1571 | |
1572 | static void |
1573 | tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group) |
1574 | { |
1575 | struct tb_tunnel *first_tunnel; |
1576 | struct tb *tb = group->tb; |
1577 | struct tb_port *in; |
1578 | int ret; |
1579 | |
1580 | tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n" , |
1581 | group->index); |
1582 | |
1583 | first_tunnel = NULL; |
1584 | list_for_each_entry(in, &group->ports, group_list) { |
1585 | int estimated_bw, estimated_up, estimated_down; |
1586 | struct tb_tunnel *tunnel; |
1587 | struct tb_port *out; |
1588 | |
1589 | if (!usb4_dp_port_bandwidth_mode_enabled(port: in)) |
1590 | continue; |
1591 | |
1592 | tunnel = tb_find_tunnel(tb, type: TB_TUNNEL_DP, src_port: in, NULL); |
1593 | if (WARN_ON(!tunnel)) |
1594 | break; |
1595 | |
1596 | if (!first_tunnel) { |
1597 | /* |
1598 | * Since USB3 bandwidth is shared by all DP |
1599 | * tunnels under the host router USB4 port, even |
1600 | * if they do not begin from the host router, we |
1601 | * can release USB3 bandwidth just once and not |
1602 | * for each tunnel separately. |
1603 | */ |
1604 | first_tunnel = tunnel; |
1605 | ret = tb_release_unused_usb3_bandwidth(tb, |
1606 | src_port: first_tunnel->src_port, dst_port: first_tunnel->dst_port); |
1607 | if (ret) { |
1608 | tb_tunnel_warn(tunnel, |
1609 | "failed to release unused bandwidth\n" ); |
1610 | break; |
1611 | } |
1612 | } |
1613 | |
1614 | out = tunnel->dst_port; |
1615 | ret = tb_available_bandwidth(tb, src_port: in, dst_port: out, available_up: &estimated_up, |
1616 | available_down: &estimated_down, include_asym: true); |
1617 | if (ret) { |
1618 | tb_tunnel_warn(tunnel, |
1619 | "failed to re-calculate estimated bandwidth\n" ); |
1620 | break; |
1621 | } |
1622 | |
1623 | /* |
1624 | * Estimated bandwidth includes: |
1625 | * - already allocated bandwidth for the DP tunnel |
1626 | * - available bandwidth along the path |
1627 | * - bandwidth allocated for USB 3.x but not used. |
1628 | */ |
1629 | tb_tunnel_dbg(tunnel, |
1630 | "re-calculated estimated bandwidth %u/%u Mb/s\n" , |
1631 | estimated_up, estimated_down); |
1632 | |
1633 | if (tb_port_path_direction_downstream(src: in, dst: out)) |
1634 | estimated_bw = estimated_down; |
1635 | else |
1636 | estimated_bw = estimated_up; |
1637 | |
1638 | if (usb4_dp_port_set_estimated_bandwidth(port: in, bw: estimated_bw)) |
1639 | tb_tunnel_warn(tunnel, |
1640 | "failed to update estimated bandwidth\n" ); |
1641 | } |
1642 | |
1643 | if (first_tunnel) |
1644 | tb_reclaim_usb3_bandwidth(tb, src_port: first_tunnel->src_port, |
1645 | dst_port: first_tunnel->dst_port); |
1646 | |
1647 | tb_dbg(tb, "bandwidth estimation for group %u done\n" , group->index); |
1648 | } |
1649 | |
1650 | static void tb_recalc_estimated_bandwidth(struct tb *tb) |
1651 | { |
1652 | struct tb_cm *tcm = tb_priv(tb); |
1653 | int i; |
1654 | |
1655 | tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n" ); |
1656 | |
1657 | for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) { |
1658 | struct tb_bandwidth_group *group = &tcm->groups[i]; |
1659 | |
1660 | if (!list_empty(head: &group->ports)) |
1661 | tb_recalc_estimated_bandwidth_for_group(group); |
1662 | } |
1663 | |
1664 | tb_dbg(tb, "bandwidth re-calculation done\n" ); |
1665 | } |
1666 | |
1667 | static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in) |
1668 | { |
1669 | struct tb_port *host_port, *port; |
1670 | struct tb_cm *tcm = tb_priv(tb); |
1671 | |
1672 | host_port = tb_route(sw: in->sw) ? |
1673 | tb_port_at(route: tb_route(sw: in->sw), sw: tb->root_switch) : NULL; |
1674 | |
1675 | list_for_each_entry(port, &tcm->dp_resources, list) { |
1676 | if (!tb_port_is_dpout(port)) |
1677 | continue; |
1678 | |
1679 | if (tb_port_is_enabled(port)) { |
1680 | tb_port_dbg(port, "DP OUT in use\n" ); |
1681 | continue; |
1682 | } |
1683 | |
1684 | tb_port_dbg(port, "DP OUT available\n" ); |
1685 | |
1686 | /* |
1687 | * Keep the DP tunnel under the topology starting from |
1688 | * the same host router downstream port. |
1689 | */ |
1690 | if (host_port && tb_route(sw: port->sw)) { |
1691 | struct tb_port *p; |
1692 | |
1693 | p = tb_port_at(route: tb_route(sw: port->sw), sw: tb->root_switch); |
1694 | if (p != host_port) |
1695 | continue; |
1696 | } |
1697 | |
1698 | return port; |
1699 | } |
1700 | |
1701 | return NULL; |
1702 | } |
1703 | |
1704 | static bool tb_tunnel_one_dp(struct tb *tb) |
1705 | { |
1706 | int available_up, available_down, ret, link_nr; |
1707 | struct tb_cm *tcm = tb_priv(tb); |
1708 | struct tb_port *port, *in, *out; |
1709 | int consumed_up, consumed_down; |
1710 | struct tb_tunnel *tunnel; |
1711 | |
1712 | /* |
1713 | * Find pair of inactive DP IN and DP OUT adapters and then |
1714 | * establish a DP tunnel between them. |
1715 | */ |
1716 | tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n" ); |
1717 | |
1718 | in = NULL; |
1719 | out = NULL; |
1720 | list_for_each_entry(port, &tcm->dp_resources, list) { |
1721 | if (!tb_port_is_dpin(port)) |
1722 | continue; |
1723 | |
1724 | if (tb_port_is_enabled(port)) { |
1725 | tb_port_dbg(port, "DP IN in use\n" ); |
1726 | continue; |
1727 | } |
1728 | |
1729 | in = port; |
1730 | tb_port_dbg(in, "DP IN available\n" ); |
1731 | |
1732 | out = tb_find_dp_out(tb, in: port); |
1733 | if (out) |
1734 | break; |
1735 | } |
1736 | |
1737 | if (!in) { |
1738 | tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n" ); |
1739 | return false; |
1740 | } |
1741 | if (!out) { |
1742 | tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n" ); |
1743 | return false; |
1744 | } |
1745 | |
1746 | /* |
1747 | * This is only applicable to links that are not bonded (so |
1748 | * when Thunderbolt 1 hardware is involved somewhere in the |
1749 | * topology). For these try to share the DP bandwidth between |
1750 | * the two lanes. |
1751 | */ |
1752 | link_nr = 1; |
1753 | list_for_each_entry(tunnel, &tcm->tunnel_list, list) { |
1754 | if (tb_tunnel_is_dp(tunnel)) { |
1755 | link_nr = 0; |
1756 | break; |
1757 | } |
1758 | } |
1759 | |
1760 | /* |
1761 | * DP stream needs the domain to be active so runtime resume |
1762 | * both ends of the tunnel. |
1763 | * |
1764 | * This should bring the routers in the middle active as well |
1765 | * and keeps the domain from runtime suspending while the DP |
1766 | * tunnel is active. |
1767 | */ |
1768 | pm_runtime_get_sync(dev: &in->sw->dev); |
1769 | pm_runtime_get_sync(dev: &out->sw->dev); |
1770 | |
1771 | if (tb_switch_alloc_dp_resource(sw: in->sw, in)) { |
1772 | tb_port_dbg(in, "no resource available for DP IN, not tunneling\n" ); |
1773 | goto err_rpm_put; |
1774 | } |
1775 | |
1776 | if (!tb_attach_bandwidth_group(tcm, in, out)) |
1777 | goto err_dealloc_dp; |
1778 | |
1779 | /* Make all unused USB3 bandwidth available for the new DP tunnel */ |
1780 | ret = tb_release_unused_usb3_bandwidth(tb, src_port: in, dst_port: out); |
1781 | if (ret) { |
1782 | tb_warn(tb, "failed to release unused bandwidth\n" ); |
1783 | goto err_detach_group; |
1784 | } |
1785 | |
1786 | ret = tb_available_bandwidth(tb, src_port: in, dst_port: out, available_up: &available_up, available_down: &available_down, |
1787 | include_asym: true); |
1788 | if (ret) |
1789 | goto err_reclaim_usb; |
1790 | |
1791 | tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n" , |
1792 | available_up, available_down); |
1793 | |
1794 | tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, max_up: available_up, |
1795 | max_down: available_down); |
1796 | if (!tunnel) { |
1797 | tb_port_dbg(out, "could not allocate DP tunnel\n" ); |
1798 | goto err_reclaim_usb; |
1799 | } |
1800 | |
1801 | if (tb_tunnel_activate(tunnel)) { |
1802 | tb_port_info(out, "DP tunnel activation failed, aborting\n" ); |
1803 | goto err_free; |
1804 | } |
1805 | |
1806 | list_add_tail(new: &tunnel->list, head: &tcm->tunnel_list); |
1807 | tb_reclaim_usb3_bandwidth(tb, src_port: in, dst_port: out); |
1808 | |
1809 | /* |
1810 | * Transition the links to asymmetric if the consumption exceeds |
1811 | * the threshold. |
1812 | */ |
1813 | if (!tb_tunnel_consumed_bandwidth(tunnel, consumed_up: &consumed_up, consumed_down: &consumed_down)) |
1814 | tb_configure_asym(tb, src_port: in, dst_port: out, requested_up: consumed_up, requested_down: consumed_down); |
1815 | |
1816 | /* Update the domain with the new bandwidth estimation */ |
1817 | tb_recalc_estimated_bandwidth(tb); |
1818 | |
1819 | /* |
1820 | * In case of DP tunnel exists, change host router's 1st children |
1821 | * TMU mode to HiFi for CL0s to work. |
1822 | */ |
1823 | tb_increase_tmu_accuracy(tunnel); |
1824 | return true; |
1825 | |
1826 | err_free: |
1827 | tb_tunnel_free(tunnel); |
1828 | err_reclaim_usb: |
1829 | tb_reclaim_usb3_bandwidth(tb, src_port: in, dst_port: out); |
1830 | err_detach_group: |
1831 | tb_detach_bandwidth_group(in); |
1832 | err_dealloc_dp: |
1833 | tb_switch_dealloc_dp_resource(sw: in->sw, in); |
1834 | err_rpm_put: |
1835 | pm_runtime_mark_last_busy(dev: &out->sw->dev); |
1836 | pm_runtime_put_autosuspend(dev: &out->sw->dev); |
1837 | pm_runtime_mark_last_busy(dev: &in->sw->dev); |
1838 | pm_runtime_put_autosuspend(dev: &in->sw->dev); |
1839 | |
1840 | return false; |
1841 | } |
1842 | |
1843 | static void tb_tunnel_dp(struct tb *tb) |
1844 | { |
1845 | if (!tb_acpi_may_tunnel_dp()) { |
1846 | tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n" ); |
1847 | return; |
1848 | } |
1849 | |
1850 | while (tb_tunnel_one_dp(tb)) |
1851 | ; |
1852 | } |
1853 | |
1854 | static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port) |
1855 | { |
1856 | struct tb_port *in, *out; |
1857 | struct tb_tunnel *tunnel; |
1858 | |
1859 | if (tb_port_is_dpin(port)) { |
1860 | tb_port_dbg(port, "DP IN resource unavailable\n" ); |
1861 | in = port; |
1862 | out = NULL; |
1863 | } else { |
1864 | tb_port_dbg(port, "DP OUT resource unavailable\n" ); |
1865 | in = NULL; |
1866 | out = port; |
1867 | } |
1868 | |
1869 | tunnel = tb_find_tunnel(tb, type: TB_TUNNEL_DP, src_port: in, dst_port: out); |
1870 | tb_deactivate_and_free_tunnel(tunnel); |
1871 | list_del_init(entry: &port->list); |
1872 | |
1873 | /* |
1874 | * See if there is another DP OUT port that can be used for |
1875 | * to create another tunnel. |
1876 | */ |
1877 | tb_recalc_estimated_bandwidth(tb); |
1878 | tb_tunnel_dp(tb); |
1879 | } |
1880 | |
1881 | static void tb_dp_resource_available(struct tb *tb, struct tb_port *port) |
1882 | { |
1883 | struct tb_cm *tcm = tb_priv(tb); |
1884 | struct tb_port *p; |
1885 | |
1886 | if (tb_port_is_enabled(port)) |
1887 | return; |
1888 | |
1889 | list_for_each_entry(p, &tcm->dp_resources, list) { |
1890 | if (p == port) |
1891 | return; |
1892 | } |
1893 | |
1894 | tb_port_dbg(port, "DP %s resource available\n" , |
1895 | tb_port_is_dpin(port) ? "IN" : "OUT" ); |
1896 | list_add_tail(new: &port->list, head: &tcm->dp_resources); |
1897 | |
1898 | /* Look for suitable DP IN <-> DP OUT pairs now */ |
1899 | tb_tunnel_dp(tb); |
1900 | } |
1901 | |
1902 | static void tb_disconnect_and_release_dp(struct tb *tb) |
1903 | { |
1904 | struct tb_cm *tcm = tb_priv(tb); |
1905 | struct tb_tunnel *tunnel, *n; |
1906 | |
1907 | /* |
1908 | * Tear down all DP tunnels and release their resources. They |
1909 | * will be re-established after resume based on plug events. |
1910 | */ |
1911 | list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) { |
1912 | if (tb_tunnel_is_dp(tunnel)) |
1913 | tb_deactivate_and_free_tunnel(tunnel); |
1914 | } |
1915 | |
1916 | while (!list_empty(head: &tcm->dp_resources)) { |
1917 | struct tb_port *port; |
1918 | |
1919 | port = list_first_entry(&tcm->dp_resources, |
1920 | struct tb_port, list); |
1921 | list_del_init(entry: &port->list); |
1922 | } |
1923 | } |
1924 | |
1925 | static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw) |
1926 | { |
1927 | struct tb_tunnel *tunnel; |
1928 | struct tb_port *up; |
1929 | |
1930 | up = tb_switch_find_port(sw, type: TB_TYPE_PCIE_UP); |
1931 | if (WARN_ON(!up)) |
1932 | return -ENODEV; |
1933 | |
1934 | tunnel = tb_find_tunnel(tb, type: TB_TUNNEL_PCI, NULL, dst_port: up); |
1935 | if (WARN_ON(!tunnel)) |
1936 | return -ENODEV; |
1937 | |
1938 | tb_switch_xhci_disconnect(sw); |
1939 | |
1940 | tb_tunnel_deactivate(tunnel); |
1941 | list_del(entry: &tunnel->list); |
1942 | tb_tunnel_free(tunnel); |
1943 | return 0; |
1944 | } |
1945 | |
1946 | static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) |
1947 | { |
1948 | struct tb_port *up, *down, *port; |
1949 | struct tb_cm *tcm = tb_priv(tb); |
1950 | struct tb_tunnel *tunnel; |
1951 | |
1952 | up = tb_switch_find_port(sw, type: TB_TYPE_PCIE_UP); |
1953 | if (!up) |
1954 | return 0; |
1955 | |
1956 | /* |
1957 | * Look up available down port. Since we are chaining it should |
1958 | * be found right above this switch. |
1959 | */ |
1960 | port = tb_switch_downstream_port(sw); |
1961 | down = tb_find_pcie_down(sw: tb_switch_parent(sw), port); |
1962 | if (!down) |
1963 | return 0; |
1964 | |
1965 | tunnel = tb_tunnel_alloc_pci(tb, up, down); |
1966 | if (!tunnel) |
1967 | return -ENOMEM; |
1968 | |
1969 | if (tb_tunnel_activate(tunnel)) { |
1970 | tb_port_info(up, |
1971 | "PCIe tunnel activation failed, aborting\n" ); |
1972 | tb_tunnel_free(tunnel); |
1973 | return -EIO; |
1974 | } |
1975 | |
1976 | /* |
1977 | * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it |
1978 | * here. |
1979 | */ |
1980 | if (tb_switch_pcie_l1_enable(sw)) |
1981 | tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n" ); |
1982 | |
1983 | if (tb_switch_xhci_connect(sw)) |
1984 | tb_sw_warn(sw, "failed to connect xHCI\n" ); |
1985 | |
1986 | list_add_tail(new: &tunnel->list, head: &tcm->tunnel_list); |
1987 | return 0; |
1988 | } |
1989 | |
1990 | static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, |
1991 | int transmit_path, int transmit_ring, |
1992 | int receive_path, int receive_ring) |
1993 | { |
1994 | struct tb_cm *tcm = tb_priv(tb); |
1995 | struct tb_port *nhi_port, *dst_port; |
1996 | struct tb_tunnel *tunnel; |
1997 | struct tb_switch *sw; |
1998 | int ret; |
1999 | |
2000 | sw = tb_to_switch(dev: xd->dev.parent); |
2001 | dst_port = tb_port_at(route: xd->route, sw); |
2002 | nhi_port = tb_switch_find_port(sw: tb->root_switch, type: TB_TYPE_NHI); |
2003 | |
2004 | mutex_lock(&tb->lock); |
2005 | |
2006 | /* |
2007 | * When tunneling DMA paths the link should not enter CL states |
2008 | * so disable them now. |
2009 | */ |
2010 | tb_disable_clx(sw); |
2011 | |
2012 | tunnel = tb_tunnel_alloc_dma(tb, nhi: nhi_port, dst: dst_port, transmit_path, |
2013 | transmit_ring, receive_path, receive_ring); |
2014 | if (!tunnel) { |
2015 | ret = -ENOMEM; |
2016 | goto err_clx; |
2017 | } |
2018 | |
2019 | if (tb_tunnel_activate(tunnel)) { |
2020 | tb_port_info(nhi_port, |
2021 | "DMA tunnel activation failed, aborting\n" ); |
2022 | ret = -EIO; |
2023 | goto err_free; |
2024 | } |
2025 | |
2026 | list_add_tail(new: &tunnel->list, head: &tcm->tunnel_list); |
2027 | mutex_unlock(lock: &tb->lock); |
2028 | return 0; |
2029 | |
2030 | err_free: |
2031 | tb_tunnel_free(tunnel); |
2032 | err_clx: |
2033 | tb_enable_clx(sw); |
2034 | mutex_unlock(lock: &tb->lock); |
2035 | |
2036 | return ret; |
2037 | } |
2038 | |
2039 | static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, |
2040 | int transmit_path, int transmit_ring, |
2041 | int receive_path, int receive_ring) |
2042 | { |
2043 | struct tb_cm *tcm = tb_priv(tb); |
2044 | struct tb_port *nhi_port, *dst_port; |
2045 | struct tb_tunnel *tunnel, *n; |
2046 | struct tb_switch *sw; |
2047 | |
2048 | sw = tb_to_switch(dev: xd->dev.parent); |
2049 | dst_port = tb_port_at(route: xd->route, sw); |
2050 | nhi_port = tb_switch_find_port(sw: tb->root_switch, type: TB_TYPE_NHI); |
2051 | |
2052 | list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { |
2053 | if (!tb_tunnel_is_dma(tunnel)) |
2054 | continue; |
2055 | if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port) |
2056 | continue; |
2057 | |
2058 | if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring, |
2059 | receive_path, receive_ring)) |
2060 | tb_deactivate_and_free_tunnel(tunnel); |
2061 | } |
2062 | |
2063 | /* |
2064 | * Try to re-enable CL states now, it is OK if this fails |
2065 | * because we may still have another DMA tunnel active through |
2066 | * the same host router USB4 downstream port. |
2067 | */ |
2068 | tb_enable_clx(sw); |
2069 | } |
2070 | |
2071 | static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, |
2072 | int transmit_path, int transmit_ring, |
2073 | int receive_path, int receive_ring) |
2074 | { |
2075 | if (!xd->is_unplugged) { |
2076 | mutex_lock(&tb->lock); |
2077 | __tb_disconnect_xdomain_paths(tb, xd, transmit_path, |
2078 | transmit_ring, receive_path, |
2079 | receive_ring); |
2080 | mutex_unlock(lock: &tb->lock); |
2081 | } |
2082 | return 0; |
2083 | } |
2084 | |
2085 | /* hotplug handling */ |
2086 | |
2087 | /* |
2088 | * tb_handle_hotplug() - handle hotplug event |
2089 | * |
2090 | * Executes on tb->wq. |
2091 | */ |
2092 | static void tb_handle_hotplug(struct work_struct *work) |
2093 | { |
2094 | struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work); |
2095 | struct tb *tb = ev->tb; |
2096 | struct tb_cm *tcm = tb_priv(tb); |
2097 | struct tb_switch *sw; |
2098 | struct tb_port *port; |
2099 | |
2100 | /* Bring the domain back from sleep if it was suspended */ |
2101 | pm_runtime_get_sync(dev: &tb->dev); |
2102 | |
2103 | mutex_lock(&tb->lock); |
2104 | if (!tcm->hotplug_active) |
2105 | goto out; /* during init, suspend or shutdown */ |
2106 | |
2107 | sw = tb_switch_find_by_route(tb, route: ev->route); |
2108 | if (!sw) { |
2109 | tb_warn(tb, |
2110 | "hotplug event from non existent switch %llx:%x (unplug: %d)\n" , |
2111 | ev->route, ev->port, ev->unplug); |
2112 | goto out; |
2113 | } |
2114 | if (ev->port > sw->config.max_port_number) { |
2115 | tb_warn(tb, |
2116 | "hotplug event from non existent port %llx:%x (unplug: %d)\n" , |
2117 | ev->route, ev->port, ev->unplug); |
2118 | goto put_sw; |
2119 | } |
2120 | port = &sw->ports[ev->port]; |
2121 | if (tb_is_upstream_port(port)) { |
2122 | tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n" , |
2123 | ev->route, ev->port, ev->unplug); |
2124 | goto put_sw; |
2125 | } |
2126 | |
2127 | pm_runtime_get_sync(dev: &sw->dev); |
2128 | |
2129 | if (ev->unplug) { |
2130 | tb_retimer_remove_all(port); |
2131 | |
2132 | if (tb_port_has_remote(port)) { |
2133 | tb_port_dbg(port, "switch unplugged\n" ); |
2134 | tb_sw_set_unplugged(sw: port->remote->sw); |
2135 | tb_free_invalid_tunnels(tb); |
2136 | tb_remove_dp_resources(sw: port->remote->sw); |
2137 | tb_switch_tmu_disable(sw: port->remote->sw); |
2138 | tb_switch_unconfigure_link(sw: port->remote->sw); |
2139 | tb_switch_set_link_width(sw: port->remote->sw, |
2140 | width: TB_LINK_WIDTH_SINGLE); |
2141 | tb_switch_remove(sw: port->remote->sw); |
2142 | port->remote = NULL; |
2143 | if (port->dual_link_port) |
2144 | port->dual_link_port->remote = NULL; |
2145 | /* Maybe we can create another DP tunnel */ |
2146 | tb_recalc_estimated_bandwidth(tb); |
2147 | tb_tunnel_dp(tb); |
2148 | } else if (port->xdomain) { |
2149 | struct tb_xdomain *xd = tb_xdomain_get(xd: port->xdomain); |
2150 | |
2151 | tb_port_dbg(port, "xdomain unplugged\n" ); |
2152 | /* |
2153 | * Service drivers are unbound during |
2154 | * tb_xdomain_remove() so setting XDomain as |
2155 | * unplugged here prevents deadlock if they call |
2156 | * tb_xdomain_disable_paths(). We will tear down |
2157 | * all the tunnels below. |
2158 | */ |
2159 | xd->is_unplugged = true; |
2160 | tb_xdomain_remove(xd); |
2161 | port->xdomain = NULL; |
2162 | __tb_disconnect_xdomain_paths(tb, xd, transmit_path: -1, transmit_ring: -1, receive_path: -1, receive_ring: -1); |
2163 | tb_xdomain_put(xd); |
2164 | tb_port_unconfigure_xdomain(port); |
2165 | } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { |
2166 | tb_dp_resource_unavailable(tb, port); |
2167 | } else if (!port->port) { |
2168 | tb_sw_dbg(sw, "xHCI disconnect request\n" ); |
2169 | tb_switch_xhci_disconnect(sw); |
2170 | } else { |
2171 | tb_port_dbg(port, |
2172 | "got unplug event for disconnected port, ignoring\n" ); |
2173 | } |
2174 | } else if (port->remote) { |
2175 | tb_port_dbg(port, "got plug event for connected port, ignoring\n" ); |
2176 | } else if (!port->port && sw->authorized) { |
2177 | tb_sw_dbg(sw, "xHCI connect request\n" ); |
2178 | tb_switch_xhci_connect(sw); |
2179 | } else { |
2180 | if (tb_port_is_null(port)) { |
2181 | tb_port_dbg(port, "hotplug: scanning\n" ); |
2182 | tb_scan_port(port); |
2183 | if (!port->remote) |
2184 | tb_port_dbg(port, "hotplug: no switch found\n" ); |
2185 | } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { |
2186 | tb_dp_resource_available(tb, port); |
2187 | } |
2188 | } |
2189 | |
2190 | pm_runtime_mark_last_busy(dev: &sw->dev); |
2191 | pm_runtime_put_autosuspend(dev: &sw->dev); |
2192 | |
2193 | put_sw: |
2194 | tb_switch_put(sw); |
2195 | out: |
2196 | mutex_unlock(lock: &tb->lock); |
2197 | |
2198 | pm_runtime_mark_last_busy(dev: &tb->dev); |
2199 | pm_runtime_put_autosuspend(dev: &tb->dev); |
2200 | |
2201 | kfree(objp: ev); |
2202 | } |
2203 | |
2204 | static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up, |
2205 | int *requested_down) |
2206 | { |
2207 | int allocated_up, allocated_down, available_up, available_down, ret; |
2208 | int requested_up_corrected, requested_down_corrected, granularity; |
2209 | int max_up, max_down, max_up_rounded, max_down_rounded; |
2210 | struct tb *tb = tunnel->tb; |
2211 | struct tb_port *in, *out; |
2212 | |
2213 | ret = tb_tunnel_allocated_bandwidth(tunnel, allocated_up: &allocated_up, allocated_down: &allocated_down); |
2214 | if (ret) |
2215 | return ret; |
2216 | |
2217 | in = tunnel->src_port; |
2218 | out = tunnel->dst_port; |
2219 | |
2220 | tb_tunnel_dbg(tunnel, "bandwidth allocated currently %d/%d Mb/s\n" , |
2221 | allocated_up, allocated_down); |
2222 | |
2223 | /* |
2224 | * If we get rounded up request from graphics side, say HBR2 x 4 |
2225 | * that is 17500 instead of 17280 (this is because of the |
2226 | * granularity), we allow it too. Here the graphics has already |
2227 | * negotiated with the DPRX the maximum possible rates (which is |
2228 | * 17280 in this case). |
2229 | * |
2230 | * Since the link cannot go higher than 17280 we use that in our |
2231 | * calculations but the DP IN adapter Allocated BW write must be |
2232 | * the same value (17500) otherwise the adapter will mark it as |
2233 | * failed for graphics. |
2234 | */ |
2235 | ret = tb_tunnel_maximum_bandwidth(tunnel, max_up: &max_up, max_down: &max_down); |
2236 | if (ret) |
2237 | return ret; |
2238 | |
2239 | ret = usb4_dp_port_granularity(port: in); |
2240 | if (ret < 0) |
2241 | return ret; |
2242 | granularity = ret; |
2243 | |
2244 | max_up_rounded = roundup(max_up, granularity); |
2245 | max_down_rounded = roundup(max_down, granularity); |
2246 | |
2247 | /* |
2248 | * This will "fix" the request down to the maximum supported |
2249 | * rate * lanes if it is at the maximum rounded up level. |
2250 | */ |
2251 | requested_up_corrected = *requested_up; |
2252 | if (requested_up_corrected == max_up_rounded) |
2253 | requested_up_corrected = max_up; |
2254 | else if (requested_up_corrected < 0) |
2255 | requested_up_corrected = 0; |
2256 | requested_down_corrected = *requested_down; |
2257 | if (requested_down_corrected == max_down_rounded) |
2258 | requested_down_corrected = max_down; |
2259 | else if (requested_down_corrected < 0) |
2260 | requested_down_corrected = 0; |
2261 | |
2262 | tb_tunnel_dbg(tunnel, "corrected bandwidth request %d/%d Mb/s\n" , |
2263 | requested_up_corrected, requested_down_corrected); |
2264 | |
2265 | if ((*requested_up >= 0 && requested_up_corrected > max_up_rounded) || |
2266 | (*requested_down >= 0 && requested_down_corrected > max_down_rounded)) { |
2267 | tb_tunnel_dbg(tunnel, |
2268 | "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n" , |
2269 | requested_up_corrected, requested_down_corrected, |
2270 | max_up_rounded, max_down_rounded); |
2271 | return -ENOBUFS; |
2272 | } |
2273 | |
2274 | if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) || |
2275 | (*requested_down >= 0 && requested_down_corrected <= allocated_down)) { |
2276 | /* |
2277 | * If bandwidth on a link is < asym_threshold transition |
2278 | * the link to symmetric. |
2279 | */ |
2280 | tb_configure_sym(tb, src_port: in, dst_port: out, requested_up: *requested_up, requested_down: *requested_down); |
2281 | /* |
2282 | * If requested bandwidth is less or equal than what is |
2283 | * currently allocated to that tunnel we simply change |
2284 | * the reservation of the tunnel. Since all the tunnels |
2285 | * going out from the same USB4 port are in the same |
2286 | * group the released bandwidth will be taken into |
2287 | * account for the other tunnels automatically below. |
2288 | */ |
2289 | return tb_tunnel_alloc_bandwidth(tunnel, alloc_up: requested_up, |
2290 | alloc_down: requested_down); |
2291 | } |
2292 | |
2293 | /* |
2294 | * More bandwidth is requested. Release all the potential |
2295 | * bandwidth from USB3 first. |
2296 | */ |
2297 | ret = tb_release_unused_usb3_bandwidth(tb, src_port: in, dst_port: out); |
2298 | if (ret) |
2299 | return ret; |
2300 | |
2301 | /* |
2302 | * Then go over all tunnels that cross the same USB4 ports (they |
2303 | * are also in the same group but we use the same function here |
2304 | * that we use with the normal bandwidth allocation). |
2305 | */ |
2306 | ret = tb_available_bandwidth(tb, src_port: in, dst_port: out, available_up: &available_up, available_down: &available_down, |
2307 | include_asym: true); |
2308 | if (ret) |
2309 | goto reclaim; |
2310 | |
2311 | tb_tunnel_dbg(tunnel, "bandwidth available for allocation %d/%d Mb/s\n" , |
2312 | available_up, available_down); |
2313 | |
2314 | if ((*requested_up >= 0 && available_up >= requested_up_corrected) || |
2315 | (*requested_down >= 0 && available_down >= requested_down_corrected)) { |
2316 | /* |
2317 | * If bandwidth on a link is >= asym_threshold |
2318 | * transition the link to asymmetric. |
2319 | */ |
2320 | ret = tb_configure_asym(tb, src_port: in, dst_port: out, requested_up: *requested_up, |
2321 | requested_down: *requested_down); |
2322 | if (ret) { |
2323 | tb_configure_sym(tb, src_port: in, dst_port: out, requested_up: 0, requested_down: 0); |
2324 | return ret; |
2325 | } |
2326 | |
2327 | ret = tb_tunnel_alloc_bandwidth(tunnel, alloc_up: requested_up, |
2328 | alloc_down: requested_down); |
2329 | if (ret) { |
2330 | tb_tunnel_warn(tunnel, "failed to allocate bandwidth\n" ); |
2331 | tb_configure_sym(tb, src_port: in, dst_port: out, requested_up: 0, requested_down: 0); |
2332 | } |
2333 | } else { |
2334 | ret = -ENOBUFS; |
2335 | } |
2336 | |
2337 | reclaim: |
2338 | tb_reclaim_usb3_bandwidth(tb, src_port: in, dst_port: out); |
2339 | return ret; |
2340 | } |
2341 | |
2342 | static void tb_handle_dp_bandwidth_request(struct work_struct *work) |
2343 | { |
2344 | struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work); |
2345 | int requested_bw, requested_up, requested_down, ret; |
2346 | struct tb_port *in, *out; |
2347 | struct tb_tunnel *tunnel; |
2348 | struct tb *tb = ev->tb; |
2349 | struct tb_cm *tcm = tb_priv(tb); |
2350 | struct tb_switch *sw; |
2351 | |
2352 | pm_runtime_get_sync(dev: &tb->dev); |
2353 | |
2354 | mutex_lock(&tb->lock); |
2355 | if (!tcm->hotplug_active) |
2356 | goto unlock; |
2357 | |
2358 | sw = tb_switch_find_by_route(tb, route: ev->route); |
2359 | if (!sw) { |
2360 | tb_warn(tb, "bandwidth request from non-existent router %llx\n" , |
2361 | ev->route); |
2362 | goto unlock; |
2363 | } |
2364 | |
2365 | in = &sw->ports[ev->port]; |
2366 | if (!tb_port_is_dpin(port: in)) { |
2367 | tb_port_warn(in, "bandwidth request to non-DP IN adapter\n" ); |
2368 | goto put_sw; |
2369 | } |
2370 | |
2371 | tb_port_dbg(in, "handling bandwidth allocation request\n" ); |
2372 | |
2373 | if (!usb4_dp_port_bandwidth_mode_enabled(port: in)) { |
2374 | tb_port_warn(in, "bandwidth allocation mode not enabled\n" ); |
2375 | goto put_sw; |
2376 | } |
2377 | |
2378 | ret = usb4_dp_port_requested_bandwidth(port: in); |
2379 | if (ret < 0) { |
2380 | if (ret == -ENODATA) |
2381 | tb_port_dbg(in, "no bandwidth request active\n" ); |
2382 | else |
2383 | tb_port_warn(in, "failed to read requested bandwidth\n" ); |
2384 | goto put_sw; |
2385 | } |
2386 | requested_bw = ret; |
2387 | |
2388 | tb_port_dbg(in, "requested bandwidth %d Mb/s\n" , requested_bw); |
2389 | |
2390 | tunnel = tb_find_tunnel(tb, type: TB_TUNNEL_DP, src_port: in, NULL); |
2391 | if (!tunnel) { |
2392 | tb_port_warn(in, "failed to find tunnel\n" ); |
2393 | goto put_sw; |
2394 | } |
2395 | |
2396 | out = tunnel->dst_port; |
2397 | |
2398 | if (tb_port_path_direction_downstream(src: in, dst: out)) { |
2399 | requested_up = -1; |
2400 | requested_down = requested_bw; |
2401 | } else { |
2402 | requested_up = requested_bw; |
2403 | requested_down = -1; |
2404 | } |
2405 | |
2406 | ret = tb_alloc_dp_bandwidth(tunnel, requested_up: &requested_up, requested_down: &requested_down); |
2407 | if (ret) { |
2408 | if (ret == -ENOBUFS) |
2409 | tb_tunnel_warn(tunnel, |
2410 | "not enough bandwidth available\n" ); |
2411 | else |
2412 | tb_tunnel_warn(tunnel, |
2413 | "failed to change bandwidth allocation\n" ); |
2414 | } else { |
2415 | tb_tunnel_dbg(tunnel, |
2416 | "bandwidth allocation changed to %d/%d Mb/s\n" , |
2417 | requested_up, requested_down); |
2418 | |
2419 | /* Update other clients about the allocation change */ |
2420 | tb_recalc_estimated_bandwidth(tb); |
2421 | } |
2422 | |
2423 | put_sw: |
2424 | tb_switch_put(sw); |
2425 | unlock: |
2426 | mutex_unlock(lock: &tb->lock); |
2427 | |
2428 | pm_runtime_mark_last_busy(dev: &tb->dev); |
2429 | pm_runtime_put_autosuspend(dev: &tb->dev); |
2430 | |
2431 | kfree(objp: ev); |
2432 | } |
2433 | |
2434 | static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port) |
2435 | { |
2436 | struct tb_hotplug_event *ev; |
2437 | |
2438 | ev = kmalloc(size: sizeof(*ev), GFP_KERNEL); |
2439 | if (!ev) |
2440 | return; |
2441 | |
2442 | ev->tb = tb; |
2443 | ev->route = route; |
2444 | ev->port = port; |
2445 | INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request); |
2446 | queue_work(wq: tb->wq, work: &ev->work); |
2447 | } |
2448 | |
2449 | static void tb_handle_notification(struct tb *tb, u64 route, |
2450 | const struct cfg_error_pkg *error) |
2451 | { |
2452 | |
2453 | switch (error->error) { |
2454 | case TB_CFG_ERROR_PCIE_WAKE: |
2455 | case TB_CFG_ERROR_DP_CON_CHANGE: |
2456 | case TB_CFG_ERROR_DPTX_DISCOVERY: |
2457 | if (tb_cfg_ack_notification(ctl: tb->ctl, route, error)) |
2458 | tb_warn(tb, "could not ack notification on %llx\n" , |
2459 | route); |
2460 | break; |
2461 | |
2462 | case TB_CFG_ERROR_DP_BW: |
2463 | if (tb_cfg_ack_notification(ctl: tb->ctl, route, error)) |
2464 | tb_warn(tb, "could not ack notification on %llx\n" , |
2465 | route); |
2466 | tb_queue_dp_bandwidth_request(tb, route, port: error->port); |
2467 | break; |
2468 | |
2469 | default: |
2470 | /* Ignore for now */ |
2471 | break; |
2472 | } |
2473 | } |
2474 | |
2475 | /* |
2476 | * tb_schedule_hotplug_handler() - callback function for the control channel |
2477 | * |
2478 | * Delegates to tb_handle_hotplug. |
2479 | */ |
2480 | static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type, |
2481 | const void *buf, size_t size) |
2482 | { |
2483 | const struct cfg_event_pkg *pkg = buf; |
2484 | u64 route = tb_cfg_get_route(header: &pkg->header); |
2485 | |
2486 | switch (type) { |
2487 | case TB_CFG_PKG_ERROR: |
2488 | tb_handle_notification(tb, route, error: (const struct cfg_error_pkg *)buf); |
2489 | return; |
2490 | case TB_CFG_PKG_EVENT: |
2491 | break; |
2492 | default: |
2493 | tb_warn(tb, "unexpected event %#x, ignoring\n" , type); |
2494 | return; |
2495 | } |
2496 | |
2497 | if (tb_cfg_ack_plug(ctl: tb->ctl, route, port: pkg->port, unplug: pkg->unplug)) { |
2498 | tb_warn(tb, "could not ack plug event on %llx:%x\n" , route, |
2499 | pkg->port); |
2500 | } |
2501 | |
2502 | tb_queue_hotplug(tb, route, port: pkg->port, unplug: pkg->unplug); |
2503 | } |
2504 | |
2505 | static void tb_stop(struct tb *tb) |
2506 | { |
2507 | struct tb_cm *tcm = tb_priv(tb); |
2508 | struct tb_tunnel *tunnel; |
2509 | struct tb_tunnel *n; |
2510 | |
2511 | cancel_delayed_work(dwork: &tcm->remove_work); |
2512 | /* tunnels are only present after everything has been initialized */ |
2513 | list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { |
2514 | /* |
2515 | * DMA tunnels require the driver to be functional so we |
2516 | * tear them down. Other protocol tunnels can be left |
2517 | * intact. |
2518 | */ |
2519 | if (tb_tunnel_is_dma(tunnel)) |
2520 | tb_tunnel_deactivate(tunnel); |
2521 | tb_tunnel_free(tunnel); |
2522 | } |
2523 | tb_switch_remove(sw: tb->root_switch); |
2524 | tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ |
2525 | } |
2526 | |
2527 | static int tb_scan_finalize_switch(struct device *dev, void *data) |
2528 | { |
2529 | if (tb_is_switch(dev)) { |
2530 | struct tb_switch *sw = tb_to_switch(dev); |
2531 | |
2532 | /* |
2533 | * If we found that the switch was already setup by the |
2534 | * boot firmware, mark it as authorized now before we |
2535 | * send uevent to userspace. |
2536 | */ |
2537 | if (sw->boot) |
2538 | sw->authorized = 1; |
2539 | |
2540 | dev_set_uevent_suppress(dev, val: false); |
2541 | kobject_uevent(kobj: &dev->kobj, action: KOBJ_ADD); |
2542 | device_for_each_child(dev, NULL, fn: tb_scan_finalize_switch); |
2543 | } |
2544 | |
2545 | return 0; |
2546 | } |
2547 | |
2548 | static int tb_start(struct tb *tb) |
2549 | { |
2550 | struct tb_cm *tcm = tb_priv(tb); |
2551 | int ret; |
2552 | |
2553 | tb->root_switch = tb_switch_alloc(tb, parent: &tb->dev, route: 0); |
2554 | if (IS_ERR(ptr: tb->root_switch)) |
2555 | return PTR_ERR(ptr: tb->root_switch); |
2556 | |
2557 | /* |
2558 | * ICM firmware upgrade needs running firmware and in native |
2559 | * mode that is not available so disable firmware upgrade of the |
2560 | * root switch. |
2561 | * |
2562 | * However, USB4 routers support NVM firmware upgrade if they |
2563 | * implement the necessary router operations. |
2564 | */ |
2565 | tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(sw: tb->root_switch); |
2566 | /* All USB4 routers support runtime PM */ |
2567 | tb->root_switch->rpm = tb_switch_is_usb4(sw: tb->root_switch); |
2568 | |
2569 | ret = tb_switch_configure(sw: tb->root_switch); |
2570 | if (ret) { |
2571 | tb_switch_put(sw: tb->root_switch); |
2572 | return ret; |
2573 | } |
2574 | |
2575 | /* Announce the switch to the world */ |
2576 | ret = tb_switch_add(sw: tb->root_switch); |
2577 | if (ret) { |
2578 | tb_switch_put(sw: tb->root_switch); |
2579 | return ret; |
2580 | } |
2581 | |
2582 | /* |
2583 | * To support highest CLx state, we set host router's TMU to |
2584 | * Normal mode. |
2585 | */ |
2586 | tb_switch_tmu_configure(sw: tb->root_switch, mode: TB_SWITCH_TMU_MODE_LOWRES); |
2587 | /* Enable TMU if it is off */ |
2588 | tb_switch_tmu_enable(sw: tb->root_switch); |
2589 | /* Full scan to discover devices added before the driver was loaded. */ |
2590 | tb_scan_switch(sw: tb->root_switch); |
2591 | /* Find out tunnels created by the boot firmware */ |
2592 | tb_discover_tunnels(tb); |
2593 | /* Add DP resources from the DP tunnels created by the boot firmware */ |
2594 | tb_discover_dp_resources(tb); |
2595 | /* |
2596 | * If the boot firmware did not create USB 3.x tunnels create them |
2597 | * now for the whole topology. |
2598 | */ |
2599 | tb_create_usb3_tunnels(sw: tb->root_switch); |
2600 | /* Add DP IN resources for the root switch */ |
2601 | tb_add_dp_resources(sw: tb->root_switch); |
2602 | /* Make the discovered switches available to the userspace */ |
2603 | device_for_each_child(dev: &tb->root_switch->dev, NULL, |
2604 | fn: tb_scan_finalize_switch); |
2605 | |
2606 | /* Allow tb_handle_hotplug to progress events */ |
2607 | tcm->hotplug_active = true; |
2608 | return 0; |
2609 | } |
2610 | |
2611 | static int tb_suspend_noirq(struct tb *tb) |
2612 | { |
2613 | struct tb_cm *tcm = tb_priv(tb); |
2614 | |
2615 | tb_dbg(tb, "suspending...\n" ); |
2616 | tb_disconnect_and_release_dp(tb); |
2617 | tb_switch_suspend(sw: tb->root_switch, runtime: false); |
2618 | tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ |
2619 | tb_dbg(tb, "suspend finished\n" ); |
2620 | |
2621 | return 0; |
2622 | } |
2623 | |
2624 | static void tb_restore_children(struct tb_switch *sw) |
2625 | { |
2626 | struct tb_port *port; |
2627 | |
2628 | /* No need to restore if the router is already unplugged */ |
2629 | if (sw->is_unplugged) |
2630 | return; |
2631 | |
2632 | if (tb_enable_clx(sw)) |
2633 | tb_sw_warn(sw, "failed to re-enable CL states\n" ); |
2634 | |
2635 | if (tb_enable_tmu(sw)) |
2636 | tb_sw_warn(sw, "failed to restore TMU configuration\n" ); |
2637 | |
2638 | tb_switch_configuration_valid(sw); |
2639 | |
2640 | tb_switch_for_each_port(sw, port) { |
2641 | if (!tb_port_has_remote(port) && !port->xdomain) |
2642 | continue; |
2643 | |
2644 | if (port->remote) { |
2645 | tb_switch_set_link_width(sw: port->remote->sw, |
2646 | width: port->remote->sw->link_width); |
2647 | tb_switch_configure_link(sw: port->remote->sw); |
2648 | |
2649 | tb_restore_children(sw: port->remote->sw); |
2650 | } else if (port->xdomain) { |
2651 | tb_port_configure_xdomain(port, xd: port->xdomain); |
2652 | } |
2653 | } |
2654 | } |
2655 | |
2656 | static int tb_resume_noirq(struct tb *tb) |
2657 | { |
2658 | struct tb_cm *tcm = tb_priv(tb); |
2659 | struct tb_tunnel *tunnel, *n; |
2660 | unsigned int usb3_delay = 0; |
2661 | LIST_HEAD(tunnels); |
2662 | |
2663 | tb_dbg(tb, "resuming...\n" ); |
2664 | |
2665 | /* remove any pci devices the firmware might have setup */ |
2666 | tb_switch_reset(sw: tb->root_switch); |
2667 | |
2668 | tb_switch_resume(sw: tb->root_switch); |
2669 | tb_free_invalid_tunnels(tb); |
2670 | tb_free_unplugged_children(sw: tb->root_switch); |
2671 | tb_restore_children(sw: tb->root_switch); |
2672 | |
2673 | /* |
2674 | * If we get here from suspend to disk the boot firmware or the |
2675 | * restore kernel might have created tunnels of its own. Since |
2676 | * we cannot be sure they are usable for us we find and tear |
2677 | * them down. |
2678 | */ |
2679 | tb_switch_discover_tunnels(sw: tb->root_switch, list: &tunnels, alloc_hopids: false); |
2680 | list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) { |
2681 | if (tb_tunnel_is_usb3(tunnel)) |
2682 | usb3_delay = 500; |
2683 | tb_tunnel_deactivate(tunnel); |
2684 | tb_tunnel_free(tunnel); |
2685 | } |
2686 | |
2687 | /* Re-create our tunnels now */ |
2688 | list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { |
2689 | /* USB3 requires delay before it can be re-activated */ |
2690 | if (tb_tunnel_is_usb3(tunnel)) { |
2691 | msleep(msecs: usb3_delay); |
2692 | /* Only need to do it once */ |
2693 | usb3_delay = 0; |
2694 | } |
2695 | tb_tunnel_restart(tunnel); |
2696 | } |
2697 | if (!list_empty(head: &tcm->tunnel_list)) { |
2698 | /* |
2699 | * the pcie links need some time to get going. |
2700 | * 100ms works for me... |
2701 | */ |
2702 | tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n" ); |
2703 | msleep(msecs: 100); |
2704 | } |
2705 | /* Allow tb_handle_hotplug to progress events */ |
2706 | tcm->hotplug_active = true; |
2707 | tb_dbg(tb, "resume finished\n" ); |
2708 | |
2709 | return 0; |
2710 | } |
2711 | |
2712 | static int tb_free_unplugged_xdomains(struct tb_switch *sw) |
2713 | { |
2714 | struct tb_port *port; |
2715 | int ret = 0; |
2716 | |
2717 | tb_switch_for_each_port(sw, port) { |
2718 | if (tb_is_upstream_port(port)) |
2719 | continue; |
2720 | if (port->xdomain && port->xdomain->is_unplugged) { |
2721 | tb_retimer_remove_all(port); |
2722 | tb_xdomain_remove(xd: port->xdomain); |
2723 | tb_port_unconfigure_xdomain(port); |
2724 | port->xdomain = NULL; |
2725 | ret++; |
2726 | } else if (port->remote) { |
2727 | ret += tb_free_unplugged_xdomains(sw: port->remote->sw); |
2728 | } |
2729 | } |
2730 | |
2731 | return ret; |
2732 | } |
2733 | |
2734 | static int tb_freeze_noirq(struct tb *tb) |
2735 | { |
2736 | struct tb_cm *tcm = tb_priv(tb); |
2737 | |
2738 | tcm->hotplug_active = false; |
2739 | return 0; |
2740 | } |
2741 | |
2742 | static int tb_thaw_noirq(struct tb *tb) |
2743 | { |
2744 | struct tb_cm *tcm = tb_priv(tb); |
2745 | |
2746 | tcm->hotplug_active = true; |
2747 | return 0; |
2748 | } |
2749 | |
2750 | static void tb_complete(struct tb *tb) |
2751 | { |
2752 | /* |
2753 | * Release any unplugged XDomains and if there is a case where |
2754 | * another domain is swapped in place of unplugged XDomain we |
2755 | * need to run another rescan. |
2756 | */ |
2757 | mutex_lock(&tb->lock); |
2758 | if (tb_free_unplugged_xdomains(sw: tb->root_switch)) |
2759 | tb_scan_switch(sw: tb->root_switch); |
2760 | mutex_unlock(lock: &tb->lock); |
2761 | } |
2762 | |
2763 | static int tb_runtime_suspend(struct tb *tb) |
2764 | { |
2765 | struct tb_cm *tcm = tb_priv(tb); |
2766 | |
2767 | mutex_lock(&tb->lock); |
2768 | tb_switch_suspend(sw: tb->root_switch, runtime: true); |
2769 | tcm->hotplug_active = false; |
2770 | mutex_unlock(lock: &tb->lock); |
2771 | |
2772 | return 0; |
2773 | } |
2774 | |
2775 | static void tb_remove_work(struct work_struct *work) |
2776 | { |
2777 | struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work); |
2778 | struct tb *tb = tcm_to_tb(tcm); |
2779 | |
2780 | mutex_lock(&tb->lock); |
2781 | if (tb->root_switch) { |
2782 | tb_free_unplugged_children(sw: tb->root_switch); |
2783 | tb_free_unplugged_xdomains(sw: tb->root_switch); |
2784 | } |
2785 | mutex_unlock(lock: &tb->lock); |
2786 | } |
2787 | |
2788 | static int tb_runtime_resume(struct tb *tb) |
2789 | { |
2790 | struct tb_cm *tcm = tb_priv(tb); |
2791 | struct tb_tunnel *tunnel, *n; |
2792 | |
2793 | mutex_lock(&tb->lock); |
2794 | tb_switch_resume(sw: tb->root_switch); |
2795 | tb_free_invalid_tunnels(tb); |
2796 | tb_restore_children(sw: tb->root_switch); |
2797 | list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) |
2798 | tb_tunnel_restart(tunnel); |
2799 | tcm->hotplug_active = true; |
2800 | mutex_unlock(lock: &tb->lock); |
2801 | |
2802 | /* |
2803 | * Schedule cleanup of any unplugged devices. Run this in a |
2804 | * separate thread to avoid possible deadlock if the device |
2805 | * removal runtime resumes the unplugged device. |
2806 | */ |
2807 | queue_delayed_work(wq: tb->wq, dwork: &tcm->remove_work, delay: msecs_to_jiffies(m: 50)); |
2808 | return 0; |
2809 | } |
2810 | |
2811 | static const struct tb_cm_ops tb_cm_ops = { |
2812 | .start = tb_start, |
2813 | .stop = tb_stop, |
2814 | .suspend_noirq = tb_suspend_noirq, |
2815 | .resume_noirq = tb_resume_noirq, |
2816 | .freeze_noirq = tb_freeze_noirq, |
2817 | .thaw_noirq = tb_thaw_noirq, |
2818 | .complete = tb_complete, |
2819 | .runtime_suspend = tb_runtime_suspend, |
2820 | .runtime_resume = tb_runtime_resume, |
2821 | .handle_event = tb_handle_event, |
2822 | .disapprove_switch = tb_disconnect_pci, |
2823 | .approve_switch = tb_tunnel_pci, |
2824 | .approve_xdomain_paths = tb_approve_xdomain_paths, |
2825 | .disconnect_xdomain_paths = tb_disconnect_xdomain_paths, |
2826 | }; |
2827 | |
2828 | /* |
2829 | * During suspend the Thunderbolt controller is reset and all PCIe |
2830 | * tunnels are lost. The NHI driver will try to reestablish all tunnels |
2831 | * during resume. This adds device links between the tunneled PCIe |
2832 | * downstream ports and the NHI so that the device core will make sure |
2833 | * NHI is resumed first before the rest. |
2834 | */ |
2835 | static bool tb_apple_add_links(struct tb_nhi *nhi) |
2836 | { |
2837 | struct pci_dev *upstream, *pdev; |
2838 | bool ret; |
2839 | |
2840 | if (!x86_apple_machine) |
2841 | return false; |
2842 | |
2843 | switch (nhi->pdev->device) { |
2844 | case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: |
2845 | case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: |
2846 | case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI: |
2847 | case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI: |
2848 | break; |
2849 | default: |
2850 | return false; |
2851 | } |
2852 | |
2853 | upstream = pci_upstream_bridge(dev: nhi->pdev); |
2854 | while (upstream) { |
2855 | if (!pci_is_pcie(dev: upstream)) |
2856 | return false; |
2857 | if (pci_pcie_type(dev: upstream) == PCI_EXP_TYPE_UPSTREAM) |
2858 | break; |
2859 | upstream = pci_upstream_bridge(dev: upstream); |
2860 | } |
2861 | |
2862 | if (!upstream) |
2863 | return false; |
2864 | |
2865 | /* |
2866 | * For each hotplug downstream port, create add device link |
2867 | * back to NHI so that PCIe tunnels can be re-established after |
2868 | * sleep. |
2869 | */ |
2870 | ret = false; |
2871 | for_each_pci_bridge(pdev, upstream->subordinate) { |
2872 | const struct device_link *link; |
2873 | |
2874 | if (!pci_is_pcie(dev: pdev)) |
2875 | continue; |
2876 | if (pci_pcie_type(dev: pdev) != PCI_EXP_TYPE_DOWNSTREAM || |
2877 | !pdev->is_hotplug_bridge) |
2878 | continue; |
2879 | |
2880 | link = device_link_add(consumer: &pdev->dev, supplier: &nhi->pdev->dev, |
2881 | DL_FLAG_AUTOREMOVE_SUPPLIER | |
2882 | DL_FLAG_PM_RUNTIME); |
2883 | if (link) { |
2884 | dev_dbg(&nhi->pdev->dev, "created link from %s\n" , |
2885 | dev_name(&pdev->dev)); |
2886 | ret = true; |
2887 | } else { |
2888 | dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n" , |
2889 | dev_name(&pdev->dev)); |
2890 | } |
2891 | } |
2892 | |
2893 | return ret; |
2894 | } |
2895 | |
2896 | struct tb *tb_probe(struct tb_nhi *nhi) |
2897 | { |
2898 | struct tb_cm *tcm; |
2899 | struct tb *tb; |
2900 | |
2901 | tb = tb_domain_alloc(nhi, TB_TIMEOUT, privsize: sizeof(*tcm)); |
2902 | if (!tb) |
2903 | return NULL; |
2904 | |
2905 | if (tb_acpi_may_tunnel_pcie()) |
2906 | tb->security_level = TB_SECURITY_USER; |
2907 | else |
2908 | tb->security_level = TB_SECURITY_NOPCIE; |
2909 | |
2910 | tb->cm_ops = &tb_cm_ops; |
2911 | |
2912 | tcm = tb_priv(tb); |
2913 | INIT_LIST_HEAD(list: &tcm->tunnel_list); |
2914 | INIT_LIST_HEAD(list: &tcm->dp_resources); |
2915 | INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work); |
2916 | tb_init_bandwidth_groups(tcm); |
2917 | |
2918 | tb_dbg(tb, "using software connection manager\n" ); |
2919 | |
2920 | /* |
2921 | * Device links are needed to make sure we establish tunnels |
2922 | * before the PCIe/USB stack is resumed so complain here if we |
2923 | * found them missing. |
2924 | */ |
2925 | if (!tb_apple_add_links(nhi) && !tb_acpi_add_links(nhi)) |
2926 | tb_warn(tb, "device links to tunneled native ports are missing!\n" ); |
2927 | |
2928 | return tb; |
2929 | } |
2930 | |