1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Thunderbolt driver - switch/port utility functions |
4 | * |
5 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> |
6 | * Copyright (C) 2018, Intel Corporation |
7 | */ |
8 | |
9 | #include <linux/delay.h> |
10 | #include <linux/idr.h> |
11 | #include <linux/module.h> |
12 | #include <linux/nvmem-provider.h> |
13 | #include <linux/pm_runtime.h> |
14 | #include <linux/sched/signal.h> |
15 | #include <linux/sizes.h> |
16 | #include <linux/slab.h> |
17 | #include <linux/string_helpers.h> |
18 | |
19 | #include "tb.h" |
20 | |
21 | /* Switch NVM support */ |
22 | |
23 | struct nvm_auth_status { |
24 | struct list_head list; |
25 | uuid_t uuid; |
26 | u32 status; |
27 | }; |
28 | |
29 | /* |
30 | * Hold NVM authentication failure status per switch This information |
31 | * needs to stay around even when the switch gets power cycled so we |
32 | * keep it separately. |
33 | */ |
34 | static LIST_HEAD(nvm_auth_status_cache); |
35 | static DEFINE_MUTEX(nvm_auth_status_lock); |
36 | |
37 | static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw) |
38 | { |
39 | struct nvm_auth_status *st; |
40 | |
41 | list_for_each_entry(st, &nvm_auth_status_cache, list) { |
42 | if (uuid_equal(u1: &st->uuid, u2: sw->uuid)) |
43 | return st; |
44 | } |
45 | |
46 | return NULL; |
47 | } |
48 | |
49 | static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status) |
50 | { |
51 | struct nvm_auth_status *st; |
52 | |
53 | mutex_lock(&nvm_auth_status_lock); |
54 | st = __nvm_get_auth_status(sw); |
55 | mutex_unlock(lock: &nvm_auth_status_lock); |
56 | |
57 | *status = st ? st->status : 0; |
58 | } |
59 | |
60 | static void nvm_set_auth_status(const struct tb_switch *sw, u32 status) |
61 | { |
62 | struct nvm_auth_status *st; |
63 | |
64 | if (WARN_ON(!sw->uuid)) |
65 | return; |
66 | |
67 | mutex_lock(&nvm_auth_status_lock); |
68 | st = __nvm_get_auth_status(sw); |
69 | |
70 | if (!st) { |
71 | st = kzalloc(size: sizeof(*st), GFP_KERNEL); |
72 | if (!st) |
73 | goto unlock; |
74 | |
75 | memcpy(&st->uuid, sw->uuid, sizeof(st->uuid)); |
76 | INIT_LIST_HEAD(list: &st->list); |
77 | list_add_tail(new: &st->list, head: &nvm_auth_status_cache); |
78 | } |
79 | |
80 | st->status = status; |
81 | unlock: |
82 | mutex_unlock(lock: &nvm_auth_status_lock); |
83 | } |
84 | |
85 | static void nvm_clear_auth_status(const struct tb_switch *sw) |
86 | { |
87 | struct nvm_auth_status *st; |
88 | |
89 | mutex_lock(&nvm_auth_status_lock); |
90 | st = __nvm_get_auth_status(sw); |
91 | if (st) { |
92 | list_del(entry: &st->list); |
93 | kfree(objp: st); |
94 | } |
95 | mutex_unlock(lock: &nvm_auth_status_lock); |
96 | } |
97 | |
98 | static int nvm_validate_and_write(struct tb_switch *sw) |
99 | { |
100 | unsigned int image_size; |
101 | const u8 *buf; |
102 | int ret; |
103 | |
104 | ret = tb_nvm_validate(nvm: sw->nvm); |
105 | if (ret) |
106 | return ret; |
107 | |
108 | ret = tb_nvm_write_headers(nvm: sw->nvm); |
109 | if (ret) |
110 | return ret; |
111 | |
112 | buf = sw->nvm->buf_data_start; |
113 | image_size = sw->nvm->buf_data_size; |
114 | |
115 | if (tb_switch_is_usb4(sw)) |
116 | ret = usb4_switch_nvm_write(sw, address: 0, buf, size: image_size); |
117 | else |
118 | ret = dma_port_flash_write(dma: sw->dma_port, address: 0, buf, size: image_size); |
119 | if (ret) |
120 | return ret; |
121 | |
122 | sw->nvm->flushed = true; |
123 | return 0; |
124 | } |
125 | |
126 | static int nvm_authenticate_host_dma_port(struct tb_switch *sw) |
127 | { |
128 | int ret = 0; |
129 | |
130 | /* |
131 | * Root switch NVM upgrade requires that we disconnect the |
132 | * existing paths first (in case it is not in safe mode |
133 | * already). |
134 | */ |
135 | if (!sw->safe_mode) { |
136 | u32 status; |
137 | |
138 | ret = tb_domain_disconnect_all_paths(tb: sw->tb); |
139 | if (ret) |
140 | return ret; |
141 | /* |
142 | * The host controller goes away pretty soon after this if |
143 | * everything goes well so getting timeout is expected. |
144 | */ |
145 | ret = dma_port_flash_update_auth(dma: sw->dma_port); |
146 | if (!ret || ret == -ETIMEDOUT) |
147 | return 0; |
148 | |
149 | /* |
150 | * Any error from update auth operation requires power |
151 | * cycling of the host router. |
152 | */ |
153 | tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n" ); |
154 | if (dma_port_flash_update_auth_status(dma: sw->dma_port, status: &status) > 0) |
155 | nvm_set_auth_status(sw, status); |
156 | } |
157 | |
158 | /* |
159 | * From safe mode we can get out by just power cycling the |
160 | * switch. |
161 | */ |
162 | dma_port_power_cycle(dma: sw->dma_port); |
163 | return ret; |
164 | } |
165 | |
166 | static int nvm_authenticate_device_dma_port(struct tb_switch *sw) |
167 | { |
168 | int ret, retries = 10; |
169 | |
170 | ret = dma_port_flash_update_auth(dma: sw->dma_port); |
171 | switch (ret) { |
172 | case 0: |
173 | case -ETIMEDOUT: |
174 | case -EACCES: |
175 | case -EINVAL: |
176 | /* Power cycle is required */ |
177 | break; |
178 | default: |
179 | return ret; |
180 | } |
181 | |
182 | /* |
183 | * Poll here for the authentication status. It takes some time |
184 | * for the device to respond (we get timeout for a while). Once |
185 | * we get response the device needs to be power cycled in order |
186 | * to the new NVM to be taken into use. |
187 | */ |
188 | do { |
189 | u32 status; |
190 | |
191 | ret = dma_port_flash_update_auth_status(dma: sw->dma_port, status: &status); |
192 | if (ret < 0 && ret != -ETIMEDOUT) |
193 | return ret; |
194 | if (ret > 0) { |
195 | if (status) { |
196 | tb_sw_warn(sw, "failed to authenticate NVM\n" ); |
197 | nvm_set_auth_status(sw, status); |
198 | } |
199 | |
200 | tb_sw_info(sw, "power cycling the switch now\n" ); |
201 | dma_port_power_cycle(dma: sw->dma_port); |
202 | return 0; |
203 | } |
204 | |
205 | msleep(msecs: 500); |
206 | } while (--retries); |
207 | |
208 | return -ETIMEDOUT; |
209 | } |
210 | |
211 | static void nvm_authenticate_start_dma_port(struct tb_switch *sw) |
212 | { |
213 | struct pci_dev *root_port; |
214 | |
215 | /* |
216 | * During host router NVM upgrade we should not allow root port to |
217 | * go into D3cold because some root ports cannot trigger PME |
218 | * itself. To be on the safe side keep the root port in D0 during |
219 | * the whole upgrade process. |
220 | */ |
221 | root_port = pcie_find_root_port(dev: sw->tb->nhi->pdev); |
222 | if (root_port) |
223 | pm_runtime_get_noresume(dev: &root_port->dev); |
224 | } |
225 | |
226 | static void nvm_authenticate_complete_dma_port(struct tb_switch *sw) |
227 | { |
228 | struct pci_dev *root_port; |
229 | |
230 | root_port = pcie_find_root_port(dev: sw->tb->nhi->pdev); |
231 | if (root_port) |
232 | pm_runtime_put(dev: &root_port->dev); |
233 | } |
234 | |
235 | static inline bool nvm_readable(struct tb_switch *sw) |
236 | { |
237 | if (tb_switch_is_usb4(sw)) { |
238 | /* |
239 | * USB4 devices must support NVM operations but it is |
240 | * optional for hosts. Therefore we query the NVM sector |
241 | * size here and if it is supported assume NVM |
242 | * operations are implemented. |
243 | */ |
244 | return usb4_switch_nvm_sector_size(sw) > 0; |
245 | } |
246 | |
247 | /* Thunderbolt 2 and 3 devices support NVM through DMA port */ |
248 | return !!sw->dma_port; |
249 | } |
250 | |
251 | static inline bool nvm_upgradeable(struct tb_switch *sw) |
252 | { |
253 | if (sw->no_nvm_upgrade) |
254 | return false; |
255 | return nvm_readable(sw); |
256 | } |
257 | |
258 | static int nvm_authenticate(struct tb_switch *sw, bool auth_only) |
259 | { |
260 | int ret; |
261 | |
262 | if (tb_switch_is_usb4(sw)) { |
263 | if (auth_only) { |
264 | ret = usb4_switch_nvm_set_offset(sw, address: 0); |
265 | if (ret) |
266 | return ret; |
267 | } |
268 | sw->nvm->authenticating = true; |
269 | return usb4_switch_nvm_authenticate(sw); |
270 | } |
271 | if (auth_only) |
272 | return -EOPNOTSUPP; |
273 | |
274 | sw->nvm->authenticating = true; |
275 | if (!tb_route(sw)) { |
276 | nvm_authenticate_start_dma_port(sw); |
277 | ret = nvm_authenticate_host_dma_port(sw); |
278 | } else { |
279 | ret = nvm_authenticate_device_dma_port(sw); |
280 | } |
281 | |
282 | return ret; |
283 | } |
284 | |
285 | /** |
286 | * tb_switch_nvm_read() - Read router NVM |
287 | * @sw: Router whose NVM to read |
288 | * @address: Start address on the NVM |
289 | * @buf: Buffer where the read data is copied |
290 | * @size: Size of the buffer in bytes |
291 | * |
292 | * Reads from router NVM and returns the requested data in @buf. Locking |
293 | * is up to the caller. Returns %0 in success and negative errno in case |
294 | * of failure. |
295 | */ |
296 | int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, |
297 | size_t size) |
298 | { |
299 | if (tb_switch_is_usb4(sw)) |
300 | return usb4_switch_nvm_read(sw, address, buf, size); |
301 | return dma_port_flash_read(dma: sw->dma_port, address, buf, size); |
302 | } |
303 | |
304 | static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes) |
305 | { |
306 | struct tb_nvm *nvm = priv; |
307 | struct tb_switch *sw = tb_to_switch(dev: nvm->dev); |
308 | int ret; |
309 | |
310 | pm_runtime_get_sync(dev: &sw->dev); |
311 | |
312 | if (!mutex_trylock(lock: &sw->tb->lock)) { |
313 | ret = restart_syscall(); |
314 | goto out; |
315 | } |
316 | |
317 | ret = tb_switch_nvm_read(sw, address: offset, buf: val, size: bytes); |
318 | mutex_unlock(lock: &sw->tb->lock); |
319 | |
320 | out: |
321 | pm_runtime_mark_last_busy(dev: &sw->dev); |
322 | pm_runtime_put_autosuspend(dev: &sw->dev); |
323 | |
324 | return ret; |
325 | } |
326 | |
327 | static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes) |
328 | { |
329 | struct tb_nvm *nvm = priv; |
330 | struct tb_switch *sw = tb_to_switch(dev: nvm->dev); |
331 | int ret; |
332 | |
333 | if (!mutex_trylock(lock: &sw->tb->lock)) |
334 | return restart_syscall(); |
335 | |
336 | /* |
337 | * Since writing the NVM image might require some special steps, |
338 | * for example when CSS headers are written, we cache the image |
339 | * locally here and handle the special cases when the user asks |
340 | * us to authenticate the image. |
341 | */ |
342 | ret = tb_nvm_write_buf(nvm, offset, val, bytes); |
343 | mutex_unlock(lock: &sw->tb->lock); |
344 | |
345 | return ret; |
346 | } |
347 | |
348 | static int tb_switch_nvm_add(struct tb_switch *sw) |
349 | { |
350 | struct tb_nvm *nvm; |
351 | int ret; |
352 | |
353 | if (!nvm_readable(sw)) |
354 | return 0; |
355 | |
356 | nvm = tb_nvm_alloc(dev: &sw->dev); |
357 | if (IS_ERR(ptr: nvm)) { |
358 | ret = PTR_ERR(ptr: nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(ptr: nvm); |
359 | goto err_nvm; |
360 | } |
361 | |
362 | ret = tb_nvm_read_version(nvm); |
363 | if (ret) |
364 | goto err_nvm; |
365 | |
366 | /* |
367 | * If the switch is in safe-mode the only accessible portion of |
368 | * the NVM is the non-active one where userspace is expected to |
369 | * write new functional NVM. |
370 | */ |
371 | if (!sw->safe_mode) { |
372 | ret = tb_nvm_add_active(nvm, reg_read: nvm_read); |
373 | if (ret) |
374 | goto err_nvm; |
375 | tb_sw_dbg(sw, "NVM version %x.%x\n" , nvm->major, nvm->minor); |
376 | } |
377 | |
378 | if (!sw->no_nvm_upgrade) { |
379 | ret = tb_nvm_add_non_active(nvm, reg_write: nvm_write); |
380 | if (ret) |
381 | goto err_nvm; |
382 | } |
383 | |
384 | sw->nvm = nvm; |
385 | return 0; |
386 | |
387 | err_nvm: |
388 | tb_sw_dbg(sw, "NVM upgrade disabled\n" ); |
389 | sw->no_nvm_upgrade = true; |
390 | if (!IS_ERR(ptr: nvm)) |
391 | tb_nvm_free(nvm); |
392 | |
393 | return ret; |
394 | } |
395 | |
396 | static void tb_switch_nvm_remove(struct tb_switch *sw) |
397 | { |
398 | struct tb_nvm *nvm; |
399 | |
400 | nvm = sw->nvm; |
401 | sw->nvm = NULL; |
402 | |
403 | if (!nvm) |
404 | return; |
405 | |
406 | /* Remove authentication status in case the switch is unplugged */ |
407 | if (!nvm->authenticating) |
408 | nvm_clear_auth_status(sw); |
409 | |
410 | tb_nvm_free(nvm); |
411 | } |
412 | |
413 | /* port utility functions */ |
414 | |
415 | static const char *tb_port_type(const struct tb_regs_port_header *port) |
416 | { |
417 | switch (port->type >> 16) { |
418 | case 0: |
419 | switch ((u8) port->type) { |
420 | case 0: |
421 | return "Inactive" ; |
422 | case 1: |
423 | return "Port" ; |
424 | case 2: |
425 | return "NHI" ; |
426 | default: |
427 | return "unknown" ; |
428 | } |
429 | case 0x2: |
430 | return "Ethernet" ; |
431 | case 0x8: |
432 | return "SATA" ; |
433 | case 0xe: |
434 | return "DP/HDMI" ; |
435 | case 0x10: |
436 | return "PCIe" ; |
437 | case 0x20: |
438 | return "USB" ; |
439 | default: |
440 | return "unknown" ; |
441 | } |
442 | } |
443 | |
444 | static void tb_dump_port(struct tb *tb, const struct tb_port *port) |
445 | { |
446 | const struct tb_regs_port_header *regs = &port->config; |
447 | |
448 | tb_dbg(tb, |
449 | " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n" , |
450 | regs->port_number, regs->vendor_id, regs->device_id, |
451 | regs->revision, regs->thunderbolt_version, tb_port_type(regs), |
452 | regs->type); |
453 | tb_dbg(tb, " Max hop id (in/out): %d/%d\n" , |
454 | regs->max_in_hop_id, regs->max_out_hop_id); |
455 | tb_dbg(tb, " Max counters: %d\n" , regs->max_counters); |
456 | tb_dbg(tb, " NFC Credits: %#x\n" , regs->nfc_credits); |
457 | tb_dbg(tb, " Credits (total/control): %u/%u\n" , port->total_credits, |
458 | port->ctl_credits); |
459 | } |
460 | |
461 | /** |
462 | * tb_port_state() - get connectedness state of a port |
463 | * @port: the port to check |
464 | * |
465 | * The port must have a TB_CAP_PHY (i.e. it should be a real port). |
466 | * |
467 | * Return: Returns an enum tb_port_state on success or an error code on failure. |
468 | */ |
469 | int tb_port_state(struct tb_port *port) |
470 | { |
471 | struct tb_cap_phy phy; |
472 | int res; |
473 | if (port->cap_phy == 0) { |
474 | tb_port_WARN(port, "does not have a PHY\n" ); |
475 | return -EINVAL; |
476 | } |
477 | res = tb_port_read(port, buffer: &phy, space: TB_CFG_PORT, offset: port->cap_phy, length: 2); |
478 | if (res) |
479 | return res; |
480 | return phy.state; |
481 | } |
482 | |
483 | /** |
484 | * tb_wait_for_port() - wait for a port to become ready |
485 | * @port: Port to wait |
486 | * @wait_if_unplugged: Wait also when port is unplugged |
487 | * |
488 | * Wait up to 1 second for a port to reach state TB_PORT_UP. If |
489 | * wait_if_unplugged is set then we also wait if the port is in state |
490 | * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after |
491 | * switch resume). Otherwise we only wait if a device is registered but the link |
492 | * has not yet been established. |
493 | * |
494 | * Return: Returns an error code on failure. Returns 0 if the port is not |
495 | * connected or failed to reach state TB_PORT_UP within one second. Returns 1 |
496 | * if the port is connected and in state TB_PORT_UP. |
497 | */ |
498 | int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged) |
499 | { |
500 | int retries = 10; |
501 | int state; |
502 | if (!port->cap_phy) { |
503 | tb_port_WARN(port, "does not have PHY\n" ); |
504 | return -EINVAL; |
505 | } |
506 | if (tb_is_upstream_port(port)) { |
507 | tb_port_WARN(port, "is the upstream port\n" ); |
508 | return -EINVAL; |
509 | } |
510 | |
511 | while (retries--) { |
512 | state = tb_port_state(port); |
513 | switch (state) { |
514 | case TB_PORT_DISABLED: |
515 | tb_port_dbg(port, "is disabled (state: 0)\n" ); |
516 | return 0; |
517 | |
518 | case TB_PORT_UNPLUGGED: |
519 | if (wait_if_unplugged) { |
520 | /* used during resume */ |
521 | tb_port_dbg(port, |
522 | "is unplugged (state: 7), retrying...\n" ); |
523 | msleep(msecs: 100); |
524 | break; |
525 | } |
526 | tb_port_dbg(port, "is unplugged (state: 7)\n" ); |
527 | return 0; |
528 | |
529 | case TB_PORT_UP: |
530 | case TB_PORT_TX_CL0S: |
531 | case TB_PORT_RX_CL0S: |
532 | case TB_PORT_CL1: |
533 | case TB_PORT_CL2: |
534 | tb_port_dbg(port, "is connected, link is up (state: %d)\n" , state); |
535 | return 1; |
536 | |
537 | default: |
538 | if (state < 0) |
539 | return state; |
540 | |
541 | /* |
542 | * After plug-in the state is TB_PORT_CONNECTING. Give it some |
543 | * time. |
544 | */ |
545 | tb_port_dbg(port, |
546 | "is connected, link is not up (state: %d), retrying...\n" , |
547 | state); |
548 | msleep(msecs: 100); |
549 | } |
550 | |
551 | } |
552 | tb_port_warn(port, |
553 | "failed to reach state TB_PORT_UP. Ignoring port...\n" ); |
554 | return 0; |
555 | } |
556 | |
557 | /** |
558 | * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port |
559 | * @port: Port to add/remove NFC credits |
560 | * @credits: Credits to add/remove |
561 | * |
562 | * Change the number of NFC credits allocated to @port by @credits. To remove |
563 | * NFC credits pass a negative amount of credits. |
564 | * |
565 | * Return: Returns 0 on success or an error code on failure. |
566 | */ |
567 | int tb_port_add_nfc_credits(struct tb_port *port, int credits) |
568 | { |
569 | u32 nfc_credits; |
570 | |
571 | if (credits == 0 || port->sw->is_unplugged) |
572 | return 0; |
573 | |
574 | /* |
575 | * USB4 restricts programming NFC buffers to lane adapters only |
576 | * so skip other ports. |
577 | */ |
578 | if (tb_switch_is_usb4(sw: port->sw) && !tb_port_is_null(port)) |
579 | return 0; |
580 | |
581 | nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK; |
582 | if (credits < 0) |
583 | credits = max_t(int, -nfc_credits, credits); |
584 | |
585 | nfc_credits += credits; |
586 | |
587 | tb_port_dbg(port, "adding %d NFC credits to %lu" , credits, |
588 | port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK); |
589 | |
590 | port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK; |
591 | port->config.nfc_credits |= nfc_credits; |
592 | |
593 | return tb_port_write(port, buffer: &port->config.nfc_credits, |
594 | space: TB_CFG_PORT, ADP_CS_4, length: 1); |
595 | } |
596 | |
597 | /** |
598 | * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER |
599 | * @port: Port whose counters to clear |
600 | * @counter: Counter index to clear |
601 | * |
602 | * Return: Returns 0 on success or an error code on failure. |
603 | */ |
604 | int tb_port_clear_counter(struct tb_port *port, int counter) |
605 | { |
606 | u32 zero[3] = { 0, 0, 0 }; |
607 | tb_port_dbg(port, "clearing counter %d\n" , counter); |
608 | return tb_port_write(port, buffer: zero, space: TB_CFG_COUNTERS, offset: 3 * counter, length: 3); |
609 | } |
610 | |
611 | /** |
612 | * tb_port_unlock() - Unlock downstream port |
613 | * @port: Port to unlock |
614 | * |
615 | * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the |
616 | * downstream router accessible for CM. |
617 | */ |
618 | int tb_port_unlock(struct tb_port *port) |
619 | { |
620 | if (tb_switch_is_icm(sw: port->sw)) |
621 | return 0; |
622 | if (!tb_port_is_null(port)) |
623 | return -EINVAL; |
624 | if (tb_switch_is_usb4(sw: port->sw)) |
625 | return usb4_port_unlock(port); |
626 | return 0; |
627 | } |
628 | |
629 | static int __tb_port_enable(struct tb_port *port, bool enable) |
630 | { |
631 | int ret; |
632 | u32 phy; |
633 | |
634 | if (!tb_port_is_null(port)) |
635 | return -EINVAL; |
636 | |
637 | ret = tb_port_read(port, buffer: &phy, space: TB_CFG_PORT, |
638 | offset: port->cap_phy + LANE_ADP_CS_1, length: 1); |
639 | if (ret) |
640 | return ret; |
641 | |
642 | if (enable) |
643 | phy &= ~LANE_ADP_CS_1_LD; |
644 | else |
645 | phy |= LANE_ADP_CS_1_LD; |
646 | |
647 | |
648 | ret = tb_port_write(port, buffer: &phy, space: TB_CFG_PORT, |
649 | offset: port->cap_phy + LANE_ADP_CS_1, length: 1); |
650 | if (ret) |
651 | return ret; |
652 | |
653 | tb_port_dbg(port, "lane %s\n" , str_enabled_disabled(enable)); |
654 | return 0; |
655 | } |
656 | |
657 | /** |
658 | * tb_port_enable() - Enable lane adapter |
659 | * @port: Port to enable (can be %NULL) |
660 | * |
661 | * This is used for lane 0 and 1 adapters to enable it. |
662 | */ |
663 | int tb_port_enable(struct tb_port *port) |
664 | { |
665 | return __tb_port_enable(port, enable: true); |
666 | } |
667 | |
668 | /** |
669 | * tb_port_disable() - Disable lane adapter |
670 | * @port: Port to disable (can be %NULL) |
671 | * |
672 | * This is used for lane 0 and 1 adapters to disable it. |
673 | */ |
674 | int tb_port_disable(struct tb_port *port) |
675 | { |
676 | return __tb_port_enable(port, enable: false); |
677 | } |
678 | |
679 | /* |
680 | * tb_init_port() - initialize a port |
681 | * |
682 | * This is a helper method for tb_switch_alloc. Does not check or initialize |
683 | * any downstream switches. |
684 | * |
685 | * Return: Returns 0 on success or an error code on failure. |
686 | */ |
687 | static int tb_init_port(struct tb_port *port) |
688 | { |
689 | int res; |
690 | int cap; |
691 | |
692 | INIT_LIST_HEAD(list: &port->list); |
693 | |
694 | /* Control adapter does not have configuration space */ |
695 | if (!port->port) |
696 | return 0; |
697 | |
698 | res = tb_port_read(port, buffer: &port->config, space: TB_CFG_PORT, offset: 0, length: 8); |
699 | if (res) { |
700 | if (res == -ENODEV) { |
701 | tb_dbg(port->sw->tb, " Port %d: not implemented\n" , |
702 | port->port); |
703 | port->disabled = true; |
704 | return 0; |
705 | } |
706 | return res; |
707 | } |
708 | |
709 | /* Port 0 is the switch itself and has no PHY. */ |
710 | if (port->config.type == TB_TYPE_PORT) { |
711 | cap = tb_port_find_cap(port, cap: TB_PORT_CAP_PHY); |
712 | |
713 | if (cap > 0) |
714 | port->cap_phy = cap; |
715 | else |
716 | tb_port_WARN(port, "non switch port without a PHY\n" ); |
717 | |
718 | cap = tb_port_find_cap(port, cap: TB_PORT_CAP_USB4); |
719 | if (cap > 0) |
720 | port->cap_usb4 = cap; |
721 | |
722 | /* |
723 | * USB4 ports the buffers allocated for the control path |
724 | * can be read from the path config space. Legacy |
725 | * devices we use hard-coded value. |
726 | */ |
727 | if (port->cap_usb4) { |
728 | struct tb_regs_hop hop; |
729 | |
730 | if (!tb_port_read(port, buffer: &hop, space: TB_CFG_HOPS, offset: 0, length: 2)) |
731 | port->ctl_credits = hop.initial_credits; |
732 | } |
733 | if (!port->ctl_credits) |
734 | port->ctl_credits = 2; |
735 | |
736 | } else { |
737 | cap = tb_port_find_cap(port, cap: TB_PORT_CAP_ADAP); |
738 | if (cap > 0) |
739 | port->cap_adap = cap; |
740 | } |
741 | |
742 | port->total_credits = |
743 | (port->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >> |
744 | ADP_CS_4_TOTAL_BUFFERS_SHIFT; |
745 | |
746 | tb_dump_port(tb: port->sw->tb, port); |
747 | return 0; |
748 | } |
749 | |
750 | static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid, |
751 | int max_hopid) |
752 | { |
753 | int port_max_hopid; |
754 | struct ida *ida; |
755 | |
756 | if (in) { |
757 | port_max_hopid = port->config.max_in_hop_id; |
758 | ida = &port->in_hopids; |
759 | } else { |
760 | port_max_hopid = port->config.max_out_hop_id; |
761 | ida = &port->out_hopids; |
762 | } |
763 | |
764 | /* |
765 | * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are |
766 | * reserved. |
767 | */ |
768 | if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID) |
769 | min_hopid = TB_PATH_MIN_HOPID; |
770 | |
771 | if (max_hopid < 0 || max_hopid > port_max_hopid) |
772 | max_hopid = port_max_hopid; |
773 | |
774 | return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL); |
775 | } |
776 | |
777 | /** |
778 | * tb_port_alloc_in_hopid() - Allocate input HopID from port |
779 | * @port: Port to allocate HopID for |
780 | * @min_hopid: Minimum acceptable input HopID |
781 | * @max_hopid: Maximum acceptable input HopID |
782 | * |
783 | * Return: HopID between @min_hopid and @max_hopid or negative errno in |
784 | * case of error. |
785 | */ |
786 | int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid) |
787 | { |
788 | return tb_port_alloc_hopid(port, in: true, min_hopid, max_hopid); |
789 | } |
790 | |
791 | /** |
792 | * tb_port_alloc_out_hopid() - Allocate output HopID from port |
793 | * @port: Port to allocate HopID for |
794 | * @min_hopid: Minimum acceptable output HopID |
795 | * @max_hopid: Maximum acceptable output HopID |
796 | * |
797 | * Return: HopID between @min_hopid and @max_hopid or negative errno in |
798 | * case of error. |
799 | */ |
800 | int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid) |
801 | { |
802 | return tb_port_alloc_hopid(port, in: false, min_hopid, max_hopid); |
803 | } |
804 | |
805 | /** |
806 | * tb_port_release_in_hopid() - Release allocated input HopID from port |
807 | * @port: Port whose HopID to release |
808 | * @hopid: HopID to release |
809 | */ |
810 | void tb_port_release_in_hopid(struct tb_port *port, int hopid) |
811 | { |
812 | ida_simple_remove(&port->in_hopids, hopid); |
813 | } |
814 | |
815 | /** |
816 | * tb_port_release_out_hopid() - Release allocated output HopID from port |
817 | * @port: Port whose HopID to release |
818 | * @hopid: HopID to release |
819 | */ |
820 | void tb_port_release_out_hopid(struct tb_port *port, int hopid) |
821 | { |
822 | ida_simple_remove(&port->out_hopids, hopid); |
823 | } |
824 | |
825 | static inline bool tb_switch_is_reachable(const struct tb_switch *parent, |
826 | const struct tb_switch *sw) |
827 | { |
828 | u64 mask = (1ULL << parent->config.depth * 8) - 1; |
829 | return (tb_route(sw: parent) & mask) == (tb_route(sw) & mask); |
830 | } |
831 | |
832 | /** |
833 | * tb_next_port_on_path() - Return next port for given port on a path |
834 | * @start: Start port of the walk |
835 | * @end: End port of the walk |
836 | * @prev: Previous port (%NULL if this is the first) |
837 | * |
838 | * This function can be used to walk from one port to another if they |
839 | * are connected through zero or more switches. If the @prev is dual |
840 | * link port, the function follows that link and returns another end on |
841 | * that same link. |
842 | * |
843 | * If the @end port has been reached, return %NULL. |
844 | * |
845 | * Domain tb->lock must be held when this function is called. |
846 | */ |
847 | struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, |
848 | struct tb_port *prev) |
849 | { |
850 | struct tb_port *next; |
851 | |
852 | if (!prev) |
853 | return start; |
854 | |
855 | if (prev->sw == end->sw) { |
856 | if (prev == end) |
857 | return NULL; |
858 | return end; |
859 | } |
860 | |
861 | if (tb_switch_is_reachable(parent: prev->sw, sw: end->sw)) { |
862 | next = tb_port_at(route: tb_route(sw: end->sw), sw: prev->sw); |
863 | /* Walk down the topology if next == prev */ |
864 | if (prev->remote && |
865 | (next == prev || next->dual_link_port == prev)) |
866 | next = prev->remote; |
867 | } else { |
868 | if (tb_is_upstream_port(port: prev)) { |
869 | next = prev->remote; |
870 | } else { |
871 | next = tb_upstream_port(sw: prev->sw); |
872 | /* |
873 | * Keep the same link if prev and next are both |
874 | * dual link ports. |
875 | */ |
876 | if (next->dual_link_port && |
877 | next->link_nr != prev->link_nr) { |
878 | next = next->dual_link_port; |
879 | } |
880 | } |
881 | } |
882 | |
883 | return next != prev ? next : NULL; |
884 | } |
885 | |
886 | /** |
887 | * tb_port_get_link_speed() - Get current link speed |
888 | * @port: Port to check (USB4 or CIO) |
889 | * |
890 | * Returns link speed in Gb/s or negative errno in case of failure. |
891 | */ |
892 | int tb_port_get_link_speed(struct tb_port *port) |
893 | { |
894 | u32 val, speed; |
895 | int ret; |
896 | |
897 | if (!port->cap_phy) |
898 | return -EINVAL; |
899 | |
900 | ret = tb_port_read(port, buffer: &val, space: TB_CFG_PORT, |
901 | offset: port->cap_phy + LANE_ADP_CS_1, length: 1); |
902 | if (ret) |
903 | return ret; |
904 | |
905 | speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >> |
906 | LANE_ADP_CS_1_CURRENT_SPEED_SHIFT; |
907 | |
908 | switch (speed) { |
909 | case LANE_ADP_CS_1_CURRENT_SPEED_GEN4: |
910 | return 40; |
911 | case LANE_ADP_CS_1_CURRENT_SPEED_GEN3: |
912 | return 20; |
913 | default: |
914 | return 10; |
915 | } |
916 | } |
917 | |
918 | /** |
919 | * tb_port_get_link_generation() - Returns link generation |
920 | * @port: Lane adapter |
921 | * |
922 | * Returns link generation as number or negative errno in case of |
923 | * failure. Does not distinguish between Thunderbolt 1 and Thunderbolt 2 |
924 | * links so for those always returns 2. |
925 | */ |
926 | int tb_port_get_link_generation(struct tb_port *port) |
927 | { |
928 | int ret; |
929 | |
930 | ret = tb_port_get_link_speed(port); |
931 | if (ret < 0) |
932 | return ret; |
933 | |
934 | switch (ret) { |
935 | case 40: |
936 | return 4; |
937 | case 20: |
938 | return 3; |
939 | default: |
940 | return 2; |
941 | } |
942 | } |
943 | |
944 | static const char *width_name(enum tb_link_width width) |
945 | { |
946 | switch (width) { |
947 | case TB_LINK_WIDTH_SINGLE: |
948 | return "symmetric, single lane" ; |
949 | case TB_LINK_WIDTH_DUAL: |
950 | return "symmetric, dual lanes" ; |
951 | case TB_LINK_WIDTH_ASYM_TX: |
952 | return "asymmetric, 3 transmitters, 1 receiver" ; |
953 | case TB_LINK_WIDTH_ASYM_RX: |
954 | return "asymmetric, 3 receivers, 1 transmitter" ; |
955 | default: |
956 | return "unknown" ; |
957 | } |
958 | } |
959 | |
960 | /** |
961 | * tb_port_get_link_width() - Get current link width |
962 | * @port: Port to check (USB4 or CIO) |
963 | * |
964 | * Returns link width. Return the link width as encoded in &enum |
965 | * tb_link_width or negative errno in case of failure. |
966 | */ |
967 | int tb_port_get_link_width(struct tb_port *port) |
968 | { |
969 | u32 val; |
970 | int ret; |
971 | |
972 | if (!port->cap_phy) |
973 | return -EINVAL; |
974 | |
975 | ret = tb_port_read(port, buffer: &val, space: TB_CFG_PORT, |
976 | offset: port->cap_phy + LANE_ADP_CS_1, length: 1); |
977 | if (ret) |
978 | return ret; |
979 | |
980 | /* Matches the values in enum tb_link_width */ |
981 | return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >> |
982 | LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT; |
983 | } |
984 | |
985 | /** |
986 | * tb_port_width_supported() - Is the given link width supported |
987 | * @port: Port to check |
988 | * @width: Widths to check (bitmask) |
989 | * |
990 | * Can be called to any lane adapter. Checks if given @width is |
991 | * supported by the hardware and returns %true if it is. |
992 | */ |
993 | bool tb_port_width_supported(struct tb_port *port, unsigned int width) |
994 | { |
995 | u32 phy, widths; |
996 | int ret; |
997 | |
998 | if (!port->cap_phy) |
999 | return false; |
1000 | |
1001 | if (width & (TB_LINK_WIDTH_ASYM_TX | TB_LINK_WIDTH_ASYM_RX)) { |
1002 | if (tb_port_get_link_generation(port) < 4 || |
1003 | !usb4_port_asym_supported(port)) |
1004 | return false; |
1005 | } |
1006 | |
1007 | ret = tb_port_read(port, buffer: &phy, space: TB_CFG_PORT, |
1008 | offset: port->cap_phy + LANE_ADP_CS_0, length: 1); |
1009 | if (ret) |
1010 | return false; |
1011 | |
1012 | /* |
1013 | * The field encoding is the same as &enum tb_link_width (which is |
1014 | * passed to @width). |
1015 | */ |
1016 | widths = FIELD_GET(LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK, phy); |
1017 | return widths & width; |
1018 | } |
1019 | |
1020 | /** |
1021 | * tb_port_set_link_width() - Set target link width of the lane adapter |
1022 | * @port: Lane adapter |
1023 | * @width: Target link width |
1024 | * |
1025 | * Sets the target link width of the lane adapter to @width. Does not |
1026 | * enable/disable lane bonding. For that call tb_port_set_lane_bonding(). |
1027 | * |
1028 | * Return: %0 in case of success and negative errno in case of error |
1029 | */ |
1030 | int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width) |
1031 | { |
1032 | u32 val; |
1033 | int ret; |
1034 | |
1035 | if (!port->cap_phy) |
1036 | return -EINVAL; |
1037 | |
1038 | ret = tb_port_read(port, buffer: &val, space: TB_CFG_PORT, |
1039 | offset: port->cap_phy + LANE_ADP_CS_1, length: 1); |
1040 | if (ret) |
1041 | return ret; |
1042 | |
1043 | val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK; |
1044 | switch (width) { |
1045 | case TB_LINK_WIDTH_SINGLE: |
1046 | /* Gen 4 link cannot be single */ |
1047 | if (tb_port_get_link_generation(port) >= 4) |
1048 | return -EOPNOTSUPP; |
1049 | val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE << |
1050 | LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; |
1051 | break; |
1052 | |
1053 | case TB_LINK_WIDTH_DUAL: |
1054 | if (tb_port_get_link_generation(port) >= 4) |
1055 | return usb4_port_asym_set_link_width(port, width); |
1056 | val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL << |
1057 | LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; |
1058 | break; |
1059 | |
1060 | case TB_LINK_WIDTH_ASYM_TX: |
1061 | case TB_LINK_WIDTH_ASYM_RX: |
1062 | return usb4_port_asym_set_link_width(port, width); |
1063 | |
1064 | default: |
1065 | return -EINVAL; |
1066 | } |
1067 | |
1068 | return tb_port_write(port, buffer: &val, space: TB_CFG_PORT, |
1069 | offset: port->cap_phy + LANE_ADP_CS_1, length: 1); |
1070 | } |
1071 | |
1072 | /** |
1073 | * tb_port_set_lane_bonding() - Enable/disable lane bonding |
1074 | * @port: Lane adapter |
1075 | * @bonding: enable/disable bonding |
1076 | * |
1077 | * Enables or disables lane bonding. This should be called after target |
1078 | * link width has been set (tb_port_set_link_width()). Note in most |
1079 | * cases one should use tb_port_lane_bonding_enable() instead to enable |
1080 | * lane bonding. |
1081 | * |
1082 | * Return: %0 in case of success and negative errno in case of error |
1083 | */ |
1084 | static int tb_port_set_lane_bonding(struct tb_port *port, bool bonding) |
1085 | { |
1086 | u32 val; |
1087 | int ret; |
1088 | |
1089 | if (!port->cap_phy) |
1090 | return -EINVAL; |
1091 | |
1092 | ret = tb_port_read(port, buffer: &val, space: TB_CFG_PORT, |
1093 | offset: port->cap_phy + LANE_ADP_CS_1, length: 1); |
1094 | if (ret) |
1095 | return ret; |
1096 | |
1097 | if (bonding) |
1098 | val |= LANE_ADP_CS_1_LB; |
1099 | else |
1100 | val &= ~LANE_ADP_CS_1_LB; |
1101 | |
1102 | return tb_port_write(port, buffer: &val, space: TB_CFG_PORT, |
1103 | offset: port->cap_phy + LANE_ADP_CS_1, length: 1); |
1104 | } |
1105 | |
1106 | /** |
1107 | * tb_port_lane_bonding_enable() - Enable bonding on port |
1108 | * @port: port to enable |
1109 | * |
1110 | * Enable bonding by setting the link width of the port and the other |
1111 | * port in case of dual link port. Does not wait for the link to |
1112 | * actually reach the bonded state so caller needs to call |
1113 | * tb_port_wait_for_link_width() before enabling any paths through the |
1114 | * link to make sure the link is in expected state. |
1115 | * |
1116 | * Return: %0 in case of success and negative errno in case of error |
1117 | */ |
1118 | int tb_port_lane_bonding_enable(struct tb_port *port) |
1119 | { |
1120 | enum tb_link_width width; |
1121 | int ret; |
1122 | |
1123 | /* |
1124 | * Enable lane bonding for both links if not already enabled by |
1125 | * for example the boot firmware. |
1126 | */ |
1127 | width = tb_port_get_link_width(port); |
1128 | if (width == TB_LINK_WIDTH_SINGLE) { |
1129 | ret = tb_port_set_link_width(port, width: TB_LINK_WIDTH_DUAL); |
1130 | if (ret) |
1131 | goto err_lane0; |
1132 | } |
1133 | |
1134 | width = tb_port_get_link_width(port: port->dual_link_port); |
1135 | if (width == TB_LINK_WIDTH_SINGLE) { |
1136 | ret = tb_port_set_link_width(port: port->dual_link_port, |
1137 | width: TB_LINK_WIDTH_DUAL); |
1138 | if (ret) |
1139 | goto err_lane0; |
1140 | } |
1141 | |
1142 | /* |
1143 | * Only set bonding if the link was not already bonded. This |
1144 | * avoids the lane adapter to re-enter bonding state. |
1145 | */ |
1146 | if (width == TB_LINK_WIDTH_SINGLE) { |
1147 | ret = tb_port_set_lane_bonding(port, bonding: true); |
1148 | if (ret) |
1149 | goto err_lane1; |
1150 | } |
1151 | |
1152 | /* |
1153 | * When lane 0 bonding is set it will affect lane 1 too so |
1154 | * update both. |
1155 | */ |
1156 | port->bonded = true; |
1157 | port->dual_link_port->bonded = true; |
1158 | |
1159 | return 0; |
1160 | |
1161 | err_lane1: |
1162 | tb_port_set_link_width(port: port->dual_link_port, width: TB_LINK_WIDTH_SINGLE); |
1163 | err_lane0: |
1164 | tb_port_set_link_width(port, width: TB_LINK_WIDTH_SINGLE); |
1165 | |
1166 | return ret; |
1167 | } |
1168 | |
1169 | /** |
1170 | * tb_port_lane_bonding_disable() - Disable bonding on port |
1171 | * @port: port to disable |
1172 | * |
1173 | * Disable bonding by setting the link width of the port and the |
1174 | * other port in case of dual link port. |
1175 | */ |
1176 | void tb_port_lane_bonding_disable(struct tb_port *port) |
1177 | { |
1178 | tb_port_set_lane_bonding(port, bonding: false); |
1179 | tb_port_set_link_width(port: port->dual_link_port, width: TB_LINK_WIDTH_SINGLE); |
1180 | tb_port_set_link_width(port, width: TB_LINK_WIDTH_SINGLE); |
1181 | port->dual_link_port->bonded = false; |
1182 | port->bonded = false; |
1183 | } |
1184 | |
1185 | /** |
1186 | * tb_port_wait_for_link_width() - Wait until link reaches specific width |
1187 | * @port: Port to wait for |
1188 | * @width: Expected link width (bitmask) |
1189 | * @timeout_msec: Timeout in ms how long to wait |
1190 | * |
1191 | * Should be used after both ends of the link have been bonded (or |
1192 | * bonding has been disabled) to wait until the link actually reaches |
1193 | * the expected state. Returns %-ETIMEDOUT if the width was not reached |
1194 | * within the given timeout, %0 if it did. Can be passed a mask of |
1195 | * expected widths and succeeds if any of the widths is reached. |
1196 | */ |
1197 | int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width, |
1198 | int timeout_msec) |
1199 | { |
1200 | ktime_t timeout = ktime_add_ms(kt: ktime_get(), msec: timeout_msec); |
1201 | int ret; |
1202 | |
1203 | /* Gen 4 link does not support single lane */ |
1204 | if ((width & TB_LINK_WIDTH_SINGLE) && |
1205 | tb_port_get_link_generation(port) >= 4) |
1206 | return -EOPNOTSUPP; |
1207 | |
1208 | do { |
1209 | ret = tb_port_get_link_width(port); |
1210 | if (ret < 0) { |
1211 | /* |
1212 | * Sometimes we get port locked error when |
1213 | * polling the lanes so we can ignore it and |
1214 | * retry. |
1215 | */ |
1216 | if (ret != -EACCES) |
1217 | return ret; |
1218 | } else if (ret & width) { |
1219 | return 0; |
1220 | } |
1221 | |
1222 | usleep_range(min: 1000, max: 2000); |
1223 | } while (ktime_before(cmp1: ktime_get(), cmp2: timeout)); |
1224 | |
1225 | return -ETIMEDOUT; |
1226 | } |
1227 | |
1228 | static int tb_port_do_update_credits(struct tb_port *port) |
1229 | { |
1230 | u32 nfc_credits; |
1231 | int ret; |
1232 | |
1233 | ret = tb_port_read(port, buffer: &nfc_credits, space: TB_CFG_PORT, ADP_CS_4, length: 1); |
1234 | if (ret) |
1235 | return ret; |
1236 | |
1237 | if (nfc_credits != port->config.nfc_credits) { |
1238 | u32 total; |
1239 | |
1240 | total = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >> |
1241 | ADP_CS_4_TOTAL_BUFFERS_SHIFT; |
1242 | |
1243 | tb_port_dbg(port, "total credits changed %u -> %u\n" , |
1244 | port->total_credits, total); |
1245 | |
1246 | port->config.nfc_credits = nfc_credits; |
1247 | port->total_credits = total; |
1248 | } |
1249 | |
1250 | return 0; |
1251 | } |
1252 | |
1253 | /** |
1254 | * tb_port_update_credits() - Re-read port total credits |
1255 | * @port: Port to update |
1256 | * |
1257 | * After the link is bonded (or bonding was disabled) the port total |
1258 | * credits may change, so this function needs to be called to re-read |
1259 | * the credits. Updates also the second lane adapter. |
1260 | */ |
1261 | int tb_port_update_credits(struct tb_port *port) |
1262 | { |
1263 | int ret; |
1264 | |
1265 | ret = tb_port_do_update_credits(port); |
1266 | if (ret) |
1267 | return ret; |
1268 | return tb_port_do_update_credits(port: port->dual_link_port); |
1269 | } |
1270 | |
1271 | static int tb_port_start_lane_initialization(struct tb_port *port) |
1272 | { |
1273 | int ret; |
1274 | |
1275 | if (tb_switch_is_usb4(sw: port->sw)) |
1276 | return 0; |
1277 | |
1278 | ret = tb_lc_start_lane_initialization(port); |
1279 | return ret == -EINVAL ? 0 : ret; |
1280 | } |
1281 | |
1282 | /* |
1283 | * Returns true if the port had something (router, XDomain) connected |
1284 | * before suspend. |
1285 | */ |
1286 | static bool tb_port_resume(struct tb_port *port) |
1287 | { |
1288 | bool has_remote = tb_port_has_remote(port); |
1289 | |
1290 | if (port->usb4) { |
1291 | usb4_port_device_resume(usb4: port->usb4); |
1292 | } else if (!has_remote) { |
1293 | /* |
1294 | * For disconnected downstream lane adapters start lane |
1295 | * initialization now so we detect future connects. |
1296 | * |
1297 | * For XDomain start the lane initialzation now so the |
1298 | * link gets re-established. |
1299 | * |
1300 | * This is only needed for non-USB4 ports. |
1301 | */ |
1302 | if (!tb_is_upstream_port(port) || port->xdomain) |
1303 | tb_port_start_lane_initialization(port); |
1304 | } |
1305 | |
1306 | return has_remote || port->xdomain; |
1307 | } |
1308 | |
1309 | /** |
1310 | * tb_port_is_enabled() - Is the adapter port enabled |
1311 | * @port: Port to check |
1312 | */ |
1313 | bool tb_port_is_enabled(struct tb_port *port) |
1314 | { |
1315 | switch (port->config.type) { |
1316 | case TB_TYPE_PCIE_UP: |
1317 | case TB_TYPE_PCIE_DOWN: |
1318 | return tb_pci_port_is_enabled(port); |
1319 | |
1320 | case TB_TYPE_DP_HDMI_IN: |
1321 | case TB_TYPE_DP_HDMI_OUT: |
1322 | return tb_dp_port_is_enabled(port); |
1323 | |
1324 | case TB_TYPE_USB3_UP: |
1325 | case TB_TYPE_USB3_DOWN: |
1326 | return tb_usb3_port_is_enabled(port); |
1327 | |
1328 | default: |
1329 | return false; |
1330 | } |
1331 | } |
1332 | |
1333 | /** |
1334 | * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled |
1335 | * @port: USB3 adapter port to check |
1336 | */ |
1337 | bool tb_usb3_port_is_enabled(struct tb_port *port) |
1338 | { |
1339 | u32 data; |
1340 | |
1341 | if (tb_port_read(port, buffer: &data, space: TB_CFG_PORT, |
1342 | offset: port->cap_adap + ADP_USB3_CS_0, length: 1)) |
1343 | return false; |
1344 | |
1345 | return !!(data & ADP_USB3_CS_0_PE); |
1346 | } |
1347 | |
1348 | /** |
1349 | * tb_usb3_port_enable() - Enable USB3 adapter port |
1350 | * @port: USB3 adapter port to enable |
1351 | * @enable: Enable/disable the USB3 adapter |
1352 | */ |
1353 | int tb_usb3_port_enable(struct tb_port *port, bool enable) |
1354 | { |
1355 | u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V) |
1356 | : ADP_USB3_CS_0_V; |
1357 | |
1358 | if (!port->cap_adap) |
1359 | return -ENXIO; |
1360 | return tb_port_write(port, buffer: &word, space: TB_CFG_PORT, |
1361 | offset: port->cap_adap + ADP_USB3_CS_0, length: 1); |
1362 | } |
1363 | |
1364 | /** |
1365 | * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled |
1366 | * @port: PCIe port to check |
1367 | */ |
1368 | bool tb_pci_port_is_enabled(struct tb_port *port) |
1369 | { |
1370 | u32 data; |
1371 | |
1372 | if (tb_port_read(port, buffer: &data, space: TB_CFG_PORT, |
1373 | offset: port->cap_adap + ADP_PCIE_CS_0, length: 1)) |
1374 | return false; |
1375 | |
1376 | return !!(data & ADP_PCIE_CS_0_PE); |
1377 | } |
1378 | |
1379 | /** |
1380 | * tb_pci_port_enable() - Enable PCIe adapter port |
1381 | * @port: PCIe port to enable |
1382 | * @enable: Enable/disable the PCIe adapter |
1383 | */ |
1384 | int tb_pci_port_enable(struct tb_port *port, bool enable) |
1385 | { |
1386 | u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0; |
1387 | if (!port->cap_adap) |
1388 | return -ENXIO; |
1389 | return tb_port_write(port, buffer: &word, space: TB_CFG_PORT, |
1390 | offset: port->cap_adap + ADP_PCIE_CS_0, length: 1); |
1391 | } |
1392 | |
1393 | /** |
1394 | * tb_dp_port_hpd_is_active() - Is HPD already active |
1395 | * @port: DP out port to check |
1396 | * |
1397 | * Checks if the DP OUT adapter port has HPD bit already set. |
1398 | */ |
1399 | int tb_dp_port_hpd_is_active(struct tb_port *port) |
1400 | { |
1401 | u32 data; |
1402 | int ret; |
1403 | |
1404 | ret = tb_port_read(port, buffer: &data, space: TB_CFG_PORT, |
1405 | offset: port->cap_adap + ADP_DP_CS_2, length: 1); |
1406 | if (ret) |
1407 | return ret; |
1408 | |
1409 | return !!(data & ADP_DP_CS_2_HPD); |
1410 | } |
1411 | |
1412 | /** |
1413 | * tb_dp_port_hpd_clear() - Clear HPD from DP IN port |
1414 | * @port: Port to clear HPD |
1415 | * |
1416 | * If the DP IN port has HPD set, this function can be used to clear it. |
1417 | */ |
1418 | int tb_dp_port_hpd_clear(struct tb_port *port) |
1419 | { |
1420 | u32 data; |
1421 | int ret; |
1422 | |
1423 | ret = tb_port_read(port, buffer: &data, space: TB_CFG_PORT, |
1424 | offset: port->cap_adap + ADP_DP_CS_3, length: 1); |
1425 | if (ret) |
1426 | return ret; |
1427 | |
1428 | data |= ADP_DP_CS_3_HPDC; |
1429 | return tb_port_write(port, buffer: &data, space: TB_CFG_PORT, |
1430 | offset: port->cap_adap + ADP_DP_CS_3, length: 1); |
1431 | } |
1432 | |
1433 | /** |
1434 | * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port |
1435 | * @port: DP IN/OUT port to set hops |
1436 | * @video: Video Hop ID |
1437 | * @aux_tx: AUX TX Hop ID |
1438 | * @aux_rx: AUX RX Hop ID |
1439 | * |
1440 | * Programs specified Hop IDs for DP IN/OUT port. Can be called for USB4 |
1441 | * router DP adapters too but does not program the values as the fields |
1442 | * are read-only. |
1443 | */ |
1444 | int tb_dp_port_set_hops(struct tb_port *port, unsigned int video, |
1445 | unsigned int aux_tx, unsigned int aux_rx) |
1446 | { |
1447 | u32 data[2]; |
1448 | int ret; |
1449 | |
1450 | if (tb_switch_is_usb4(sw: port->sw)) |
1451 | return 0; |
1452 | |
1453 | ret = tb_port_read(port, buffer: data, space: TB_CFG_PORT, |
1454 | offset: port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); |
1455 | if (ret) |
1456 | return ret; |
1457 | |
1458 | data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK; |
1459 | data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; |
1460 | data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; |
1461 | |
1462 | data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) & |
1463 | ADP_DP_CS_0_VIDEO_HOPID_MASK; |
1464 | data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK; |
1465 | data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) & |
1466 | ADP_DP_CS_1_AUX_RX_HOPID_MASK; |
1467 | |
1468 | return tb_port_write(port, buffer: data, space: TB_CFG_PORT, |
1469 | offset: port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); |
1470 | } |
1471 | |
1472 | /** |
1473 | * tb_dp_port_is_enabled() - Is DP adapter port enabled |
1474 | * @port: DP adapter port to check |
1475 | */ |
1476 | bool tb_dp_port_is_enabled(struct tb_port *port) |
1477 | { |
1478 | u32 data[2]; |
1479 | |
1480 | if (tb_port_read(port, buffer: data, space: TB_CFG_PORT, offset: port->cap_adap + ADP_DP_CS_0, |
1481 | ARRAY_SIZE(data))) |
1482 | return false; |
1483 | |
1484 | return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE)); |
1485 | } |
1486 | |
1487 | /** |
1488 | * tb_dp_port_enable() - Enables/disables DP paths of a port |
1489 | * @port: DP IN/OUT port |
1490 | * @enable: Enable/disable DP path |
1491 | * |
1492 | * Once Hop IDs are programmed DP paths can be enabled or disabled by |
1493 | * calling this function. |
1494 | */ |
1495 | int tb_dp_port_enable(struct tb_port *port, bool enable) |
1496 | { |
1497 | u32 data[2]; |
1498 | int ret; |
1499 | |
1500 | ret = tb_port_read(port, buffer: data, space: TB_CFG_PORT, |
1501 | offset: port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); |
1502 | if (ret) |
1503 | return ret; |
1504 | |
1505 | if (enable) |
1506 | data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE; |
1507 | else |
1508 | data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE); |
1509 | |
1510 | return tb_port_write(port, buffer: data, space: TB_CFG_PORT, |
1511 | offset: port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); |
1512 | } |
1513 | |
1514 | /* switch utility functions */ |
1515 | |
1516 | static const char *tb_switch_generation_name(const struct tb_switch *sw) |
1517 | { |
1518 | switch (sw->generation) { |
1519 | case 1: |
1520 | return "Thunderbolt 1" ; |
1521 | case 2: |
1522 | return "Thunderbolt 2" ; |
1523 | case 3: |
1524 | return "Thunderbolt 3" ; |
1525 | case 4: |
1526 | return "USB4" ; |
1527 | default: |
1528 | return "Unknown" ; |
1529 | } |
1530 | } |
1531 | |
1532 | static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw) |
1533 | { |
1534 | const struct tb_regs_switch_header *regs = &sw->config; |
1535 | |
1536 | tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n" , |
1537 | tb_switch_generation_name(sw), regs->vendor_id, regs->device_id, |
1538 | regs->revision, regs->thunderbolt_version); |
1539 | tb_dbg(tb, " Max Port Number: %d\n" , regs->max_port_number); |
1540 | tb_dbg(tb, " Config:\n" ); |
1541 | tb_dbg(tb, |
1542 | " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n" , |
1543 | regs->upstream_port_number, regs->depth, |
1544 | (((u64) regs->route_hi) << 32) | regs->route_lo, |
1545 | regs->enabled, regs->plug_events_delay); |
1546 | tb_dbg(tb, " unknown1: %#x unknown4: %#x\n" , |
1547 | regs->__unknown1, regs->__unknown4); |
1548 | } |
1549 | |
1550 | /** |
1551 | * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET |
1552 | * @sw: Switch to reset |
1553 | * |
1554 | * Return: Returns 0 on success or an error code on failure. |
1555 | */ |
1556 | int tb_switch_reset(struct tb_switch *sw) |
1557 | { |
1558 | struct tb_cfg_result res; |
1559 | |
1560 | if (sw->generation > 1) |
1561 | return 0; |
1562 | |
1563 | tb_sw_dbg(sw, "resetting switch\n" ); |
1564 | |
1565 | res.err = tb_sw_write(sw, buffer: ((u32 *) &sw->config) + 2, |
1566 | space: TB_CFG_SWITCH, offset: 2, length: 2); |
1567 | if (res.err) |
1568 | return res.err; |
1569 | res = tb_cfg_reset(ctl: sw->tb->ctl, route: tb_route(sw)); |
1570 | if (res.err > 0) |
1571 | return -EIO; |
1572 | return res.err; |
1573 | } |
1574 | |
1575 | /** |
1576 | * tb_switch_wait_for_bit() - Wait for specified value of bits in offset |
1577 | * @sw: Router to read the offset value from |
1578 | * @offset: Offset in the router config space to read from |
1579 | * @bit: Bit mask in the offset to wait for |
1580 | * @value: Value of the bits to wait for |
1581 | * @timeout_msec: Timeout in ms how long to wait |
1582 | * |
1583 | * Wait till the specified bits in specified offset reach specified value. |
1584 | * Returns %0 in case of success, %-ETIMEDOUT if the @value was not reached |
1585 | * within the given timeout or a negative errno in case of failure. |
1586 | */ |
1587 | int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit, |
1588 | u32 value, int timeout_msec) |
1589 | { |
1590 | ktime_t timeout = ktime_add_ms(kt: ktime_get(), msec: timeout_msec); |
1591 | |
1592 | do { |
1593 | u32 val; |
1594 | int ret; |
1595 | |
1596 | ret = tb_sw_read(sw, buffer: &val, space: TB_CFG_SWITCH, offset, length: 1); |
1597 | if (ret) |
1598 | return ret; |
1599 | |
1600 | if ((val & bit) == value) |
1601 | return 0; |
1602 | |
1603 | usleep_range(min: 50, max: 100); |
1604 | } while (ktime_before(cmp1: ktime_get(), cmp2: timeout)); |
1605 | |
1606 | return -ETIMEDOUT; |
1607 | } |
1608 | |
1609 | /* |
1610 | * tb_plug_events_active() - enable/disable plug events on a switch |
1611 | * |
1612 | * Also configures a sane plug_events_delay of 255ms. |
1613 | * |
1614 | * Return: Returns 0 on success or an error code on failure. |
1615 | */ |
1616 | static int tb_plug_events_active(struct tb_switch *sw, bool active) |
1617 | { |
1618 | u32 data; |
1619 | int res; |
1620 | |
1621 | if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw)) |
1622 | return 0; |
1623 | |
1624 | sw->config.plug_events_delay = 0xff; |
1625 | res = tb_sw_write(sw, buffer: ((u32 *) &sw->config) + 4, space: TB_CFG_SWITCH, offset: 4, length: 1); |
1626 | if (res) |
1627 | return res; |
1628 | |
1629 | res = tb_sw_read(sw, buffer: &data, space: TB_CFG_SWITCH, offset: sw->cap_plug_events + 1, length: 1); |
1630 | if (res) |
1631 | return res; |
1632 | |
1633 | if (active) { |
1634 | data = data & 0xFFFFFF83; |
1635 | switch (sw->config.device_id) { |
1636 | case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: |
1637 | case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: |
1638 | case PCI_DEVICE_ID_INTEL_PORT_RIDGE: |
1639 | break; |
1640 | default: |
1641 | /* |
1642 | * Skip Alpine Ridge, it needs to have vendor |
1643 | * specific USB hotplug event enabled for the |
1644 | * internal xHCI to work. |
1645 | */ |
1646 | if (!tb_switch_is_alpine_ridge(sw)) |
1647 | data |= TB_PLUG_EVENTS_USB_DISABLE; |
1648 | } |
1649 | } else { |
1650 | data = data | 0x7c; |
1651 | } |
1652 | return tb_sw_write(sw, buffer: &data, space: TB_CFG_SWITCH, |
1653 | offset: sw->cap_plug_events + 1, length: 1); |
1654 | } |
1655 | |
1656 | static ssize_t authorized_show(struct device *dev, |
1657 | struct device_attribute *attr, |
1658 | char *buf) |
1659 | { |
1660 | struct tb_switch *sw = tb_to_switch(dev); |
1661 | |
1662 | return sysfs_emit(buf, fmt: "%u\n" , sw->authorized); |
1663 | } |
1664 | |
1665 | static int disapprove_switch(struct device *dev, void *not_used) |
1666 | { |
1667 | char *envp[] = { "AUTHORIZED=0" , NULL }; |
1668 | struct tb_switch *sw; |
1669 | |
1670 | sw = tb_to_switch(dev); |
1671 | if (sw && sw->authorized) { |
1672 | int ret; |
1673 | |
1674 | /* First children */ |
1675 | ret = device_for_each_child_reverse(dev: &sw->dev, NULL, fn: disapprove_switch); |
1676 | if (ret) |
1677 | return ret; |
1678 | |
1679 | ret = tb_domain_disapprove_switch(tb: sw->tb, sw); |
1680 | if (ret) |
1681 | return ret; |
1682 | |
1683 | sw->authorized = 0; |
1684 | kobject_uevent_env(kobj: &sw->dev.kobj, action: KOBJ_CHANGE, envp); |
1685 | } |
1686 | |
1687 | return 0; |
1688 | } |
1689 | |
1690 | static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) |
1691 | { |
1692 | char envp_string[13]; |
1693 | int ret = -EINVAL; |
1694 | char *envp[] = { envp_string, NULL }; |
1695 | |
1696 | if (!mutex_trylock(lock: &sw->tb->lock)) |
1697 | return restart_syscall(); |
1698 | |
1699 | if (!!sw->authorized == !!val) |
1700 | goto unlock; |
1701 | |
1702 | switch (val) { |
1703 | /* Disapprove switch */ |
1704 | case 0: |
1705 | if (tb_route(sw)) { |
1706 | ret = disapprove_switch(dev: &sw->dev, NULL); |
1707 | goto unlock; |
1708 | } |
1709 | break; |
1710 | |
1711 | /* Approve switch */ |
1712 | case 1: |
1713 | if (sw->key) |
1714 | ret = tb_domain_approve_switch_key(tb: sw->tb, sw); |
1715 | else |
1716 | ret = tb_domain_approve_switch(tb: sw->tb, sw); |
1717 | break; |
1718 | |
1719 | /* Challenge switch */ |
1720 | case 2: |
1721 | if (sw->key) |
1722 | ret = tb_domain_challenge_switch_key(tb: sw->tb, sw); |
1723 | break; |
1724 | |
1725 | default: |
1726 | break; |
1727 | } |
1728 | |
1729 | if (!ret) { |
1730 | sw->authorized = val; |
1731 | /* |
1732 | * Notify status change to the userspace, informing the new |
1733 | * value of /sys/bus/thunderbolt/devices/.../authorized. |
1734 | */ |
1735 | sprintf(buf: envp_string, fmt: "AUTHORIZED=%u" , sw->authorized); |
1736 | kobject_uevent_env(kobj: &sw->dev.kobj, action: KOBJ_CHANGE, envp); |
1737 | } |
1738 | |
1739 | unlock: |
1740 | mutex_unlock(lock: &sw->tb->lock); |
1741 | return ret; |
1742 | } |
1743 | |
1744 | static ssize_t authorized_store(struct device *dev, |
1745 | struct device_attribute *attr, |
1746 | const char *buf, size_t count) |
1747 | { |
1748 | struct tb_switch *sw = tb_to_switch(dev); |
1749 | unsigned int val; |
1750 | ssize_t ret; |
1751 | |
1752 | ret = kstrtouint(s: buf, base: 0, res: &val); |
1753 | if (ret) |
1754 | return ret; |
1755 | if (val > 2) |
1756 | return -EINVAL; |
1757 | |
1758 | pm_runtime_get_sync(dev: &sw->dev); |
1759 | ret = tb_switch_set_authorized(sw, val); |
1760 | pm_runtime_mark_last_busy(dev: &sw->dev); |
1761 | pm_runtime_put_autosuspend(dev: &sw->dev); |
1762 | |
1763 | return ret ? ret : count; |
1764 | } |
1765 | static DEVICE_ATTR_RW(authorized); |
1766 | |
1767 | static ssize_t boot_show(struct device *dev, struct device_attribute *attr, |
1768 | char *buf) |
1769 | { |
1770 | struct tb_switch *sw = tb_to_switch(dev); |
1771 | |
1772 | return sysfs_emit(buf, fmt: "%u\n" , sw->boot); |
1773 | } |
1774 | static DEVICE_ATTR_RO(boot); |
1775 | |
1776 | static ssize_t device_show(struct device *dev, struct device_attribute *attr, |
1777 | char *buf) |
1778 | { |
1779 | struct tb_switch *sw = tb_to_switch(dev); |
1780 | |
1781 | return sysfs_emit(buf, fmt: "%#x\n" , sw->device); |
1782 | } |
1783 | static DEVICE_ATTR_RO(device); |
1784 | |
1785 | static ssize_t |
1786 | device_name_show(struct device *dev, struct device_attribute *attr, char *buf) |
1787 | { |
1788 | struct tb_switch *sw = tb_to_switch(dev); |
1789 | |
1790 | return sysfs_emit(buf, fmt: "%s\n" , sw->device_name ?: "" ); |
1791 | } |
1792 | static DEVICE_ATTR_RO(device_name); |
1793 | |
1794 | static ssize_t |
1795 | generation_show(struct device *dev, struct device_attribute *attr, char *buf) |
1796 | { |
1797 | struct tb_switch *sw = tb_to_switch(dev); |
1798 | |
1799 | return sysfs_emit(buf, fmt: "%u\n" , sw->generation); |
1800 | } |
1801 | static DEVICE_ATTR_RO(generation); |
1802 | |
1803 | static ssize_t key_show(struct device *dev, struct device_attribute *attr, |
1804 | char *buf) |
1805 | { |
1806 | struct tb_switch *sw = tb_to_switch(dev); |
1807 | ssize_t ret; |
1808 | |
1809 | if (!mutex_trylock(lock: &sw->tb->lock)) |
1810 | return restart_syscall(); |
1811 | |
1812 | if (sw->key) |
1813 | ret = sysfs_emit(buf, fmt: "%*phN\n" , TB_SWITCH_KEY_SIZE, sw->key); |
1814 | else |
1815 | ret = sysfs_emit(buf, fmt: "\n" ); |
1816 | |
1817 | mutex_unlock(lock: &sw->tb->lock); |
1818 | return ret; |
1819 | } |
1820 | |
1821 | static ssize_t key_store(struct device *dev, struct device_attribute *attr, |
1822 | const char *buf, size_t count) |
1823 | { |
1824 | struct tb_switch *sw = tb_to_switch(dev); |
1825 | u8 key[TB_SWITCH_KEY_SIZE]; |
1826 | ssize_t ret = count; |
1827 | bool clear = false; |
1828 | |
1829 | if (!strcmp(buf, "\n" )) |
1830 | clear = true; |
1831 | else if (hex2bin(dst: key, src: buf, count: sizeof(key))) |
1832 | return -EINVAL; |
1833 | |
1834 | if (!mutex_trylock(lock: &sw->tb->lock)) |
1835 | return restart_syscall(); |
1836 | |
1837 | if (sw->authorized) { |
1838 | ret = -EBUSY; |
1839 | } else { |
1840 | kfree(objp: sw->key); |
1841 | if (clear) { |
1842 | sw->key = NULL; |
1843 | } else { |
1844 | sw->key = kmemdup(p: key, size: sizeof(key), GFP_KERNEL); |
1845 | if (!sw->key) |
1846 | ret = -ENOMEM; |
1847 | } |
1848 | } |
1849 | |
1850 | mutex_unlock(lock: &sw->tb->lock); |
1851 | return ret; |
1852 | } |
1853 | static DEVICE_ATTR(key, 0600, key_show, key_store); |
1854 | |
1855 | static ssize_t speed_show(struct device *dev, struct device_attribute *attr, |
1856 | char *buf) |
1857 | { |
1858 | struct tb_switch *sw = tb_to_switch(dev); |
1859 | |
1860 | return sysfs_emit(buf, fmt: "%u.0 Gb/s\n" , sw->link_speed); |
1861 | } |
1862 | |
1863 | /* |
1864 | * Currently all lanes must run at the same speed but we expose here |
1865 | * both directions to allow possible asymmetric links in the future. |
1866 | */ |
1867 | static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL); |
1868 | static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL); |
1869 | |
1870 | static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr, |
1871 | char *buf) |
1872 | { |
1873 | struct tb_switch *sw = tb_to_switch(dev); |
1874 | unsigned int width; |
1875 | |
1876 | switch (sw->link_width) { |
1877 | case TB_LINK_WIDTH_SINGLE: |
1878 | case TB_LINK_WIDTH_ASYM_TX: |
1879 | width = 1; |
1880 | break; |
1881 | case TB_LINK_WIDTH_DUAL: |
1882 | width = 2; |
1883 | break; |
1884 | case TB_LINK_WIDTH_ASYM_RX: |
1885 | width = 3; |
1886 | break; |
1887 | default: |
1888 | WARN_ON_ONCE(1); |
1889 | return -EINVAL; |
1890 | } |
1891 | |
1892 | return sysfs_emit(buf, fmt: "%u\n" , width); |
1893 | } |
1894 | static DEVICE_ATTR(rx_lanes, 0444, rx_lanes_show, NULL); |
1895 | |
1896 | static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr, |
1897 | char *buf) |
1898 | { |
1899 | struct tb_switch *sw = tb_to_switch(dev); |
1900 | unsigned int width; |
1901 | |
1902 | switch (sw->link_width) { |
1903 | case TB_LINK_WIDTH_SINGLE: |
1904 | case TB_LINK_WIDTH_ASYM_RX: |
1905 | width = 1; |
1906 | break; |
1907 | case TB_LINK_WIDTH_DUAL: |
1908 | width = 2; |
1909 | break; |
1910 | case TB_LINK_WIDTH_ASYM_TX: |
1911 | width = 3; |
1912 | break; |
1913 | default: |
1914 | WARN_ON_ONCE(1); |
1915 | return -EINVAL; |
1916 | } |
1917 | |
1918 | return sysfs_emit(buf, fmt: "%u\n" , width); |
1919 | } |
1920 | static DEVICE_ATTR(tx_lanes, 0444, tx_lanes_show, NULL); |
1921 | |
1922 | static ssize_t nvm_authenticate_show(struct device *dev, |
1923 | struct device_attribute *attr, char *buf) |
1924 | { |
1925 | struct tb_switch *sw = tb_to_switch(dev); |
1926 | u32 status; |
1927 | |
1928 | nvm_get_auth_status(sw, status: &status); |
1929 | return sysfs_emit(buf, fmt: "%#x\n" , status); |
1930 | } |
1931 | |
1932 | static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf, |
1933 | bool disconnect) |
1934 | { |
1935 | struct tb_switch *sw = tb_to_switch(dev); |
1936 | int val, ret; |
1937 | |
1938 | pm_runtime_get_sync(dev: &sw->dev); |
1939 | |
1940 | if (!mutex_trylock(lock: &sw->tb->lock)) { |
1941 | ret = restart_syscall(); |
1942 | goto exit_rpm; |
1943 | } |
1944 | |
1945 | if (sw->no_nvm_upgrade) { |
1946 | ret = -EOPNOTSUPP; |
1947 | goto exit_unlock; |
1948 | } |
1949 | |
1950 | /* If NVMem devices are not yet added */ |
1951 | if (!sw->nvm) { |
1952 | ret = -EAGAIN; |
1953 | goto exit_unlock; |
1954 | } |
1955 | |
1956 | ret = kstrtoint(s: buf, base: 10, res: &val); |
1957 | if (ret) |
1958 | goto exit_unlock; |
1959 | |
1960 | /* Always clear the authentication status */ |
1961 | nvm_clear_auth_status(sw); |
1962 | |
1963 | if (val > 0) { |
1964 | if (val == AUTHENTICATE_ONLY) { |
1965 | if (disconnect) |
1966 | ret = -EINVAL; |
1967 | else |
1968 | ret = nvm_authenticate(sw, auth_only: true); |
1969 | } else { |
1970 | if (!sw->nvm->flushed) { |
1971 | if (!sw->nvm->buf) { |
1972 | ret = -EINVAL; |
1973 | goto exit_unlock; |
1974 | } |
1975 | |
1976 | ret = nvm_validate_and_write(sw); |
1977 | if (ret || val == WRITE_ONLY) |
1978 | goto exit_unlock; |
1979 | } |
1980 | if (val == WRITE_AND_AUTHENTICATE) { |
1981 | if (disconnect) |
1982 | ret = tb_lc_force_power(sw); |
1983 | else |
1984 | ret = nvm_authenticate(sw, auth_only: false); |
1985 | } |
1986 | } |
1987 | } |
1988 | |
1989 | exit_unlock: |
1990 | mutex_unlock(lock: &sw->tb->lock); |
1991 | exit_rpm: |
1992 | pm_runtime_mark_last_busy(dev: &sw->dev); |
1993 | pm_runtime_put_autosuspend(dev: &sw->dev); |
1994 | |
1995 | return ret; |
1996 | } |
1997 | |
1998 | static ssize_t nvm_authenticate_store(struct device *dev, |
1999 | struct device_attribute *attr, const char *buf, size_t count) |
2000 | { |
2001 | int ret = nvm_authenticate_sysfs(dev, buf, disconnect: false); |
2002 | if (ret) |
2003 | return ret; |
2004 | return count; |
2005 | } |
2006 | static DEVICE_ATTR_RW(nvm_authenticate); |
2007 | |
2008 | static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev, |
2009 | struct device_attribute *attr, char *buf) |
2010 | { |
2011 | return nvm_authenticate_show(dev, attr, buf); |
2012 | } |
2013 | |
2014 | static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev, |
2015 | struct device_attribute *attr, const char *buf, size_t count) |
2016 | { |
2017 | int ret; |
2018 | |
2019 | ret = nvm_authenticate_sysfs(dev, buf, disconnect: true); |
2020 | return ret ? ret : count; |
2021 | } |
2022 | static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect); |
2023 | |
2024 | static ssize_t nvm_version_show(struct device *dev, |
2025 | struct device_attribute *attr, char *buf) |
2026 | { |
2027 | struct tb_switch *sw = tb_to_switch(dev); |
2028 | int ret; |
2029 | |
2030 | if (!mutex_trylock(lock: &sw->tb->lock)) |
2031 | return restart_syscall(); |
2032 | |
2033 | if (sw->safe_mode) |
2034 | ret = -ENODATA; |
2035 | else if (!sw->nvm) |
2036 | ret = -EAGAIN; |
2037 | else |
2038 | ret = sysfs_emit(buf, fmt: "%x.%x\n" , sw->nvm->major, sw->nvm->minor); |
2039 | |
2040 | mutex_unlock(lock: &sw->tb->lock); |
2041 | |
2042 | return ret; |
2043 | } |
2044 | static DEVICE_ATTR_RO(nvm_version); |
2045 | |
2046 | static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, |
2047 | char *buf) |
2048 | { |
2049 | struct tb_switch *sw = tb_to_switch(dev); |
2050 | |
2051 | return sysfs_emit(buf, fmt: "%#x\n" , sw->vendor); |
2052 | } |
2053 | static DEVICE_ATTR_RO(vendor); |
2054 | |
2055 | static ssize_t |
2056 | vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf) |
2057 | { |
2058 | struct tb_switch *sw = tb_to_switch(dev); |
2059 | |
2060 | return sysfs_emit(buf, fmt: "%s\n" , sw->vendor_name ?: "" ); |
2061 | } |
2062 | static DEVICE_ATTR_RO(vendor_name); |
2063 | |
2064 | static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr, |
2065 | char *buf) |
2066 | { |
2067 | struct tb_switch *sw = tb_to_switch(dev); |
2068 | |
2069 | return sysfs_emit(buf, fmt: "%pUb\n" , sw->uuid); |
2070 | } |
2071 | static DEVICE_ATTR_RO(unique_id); |
2072 | |
2073 | static struct attribute *switch_attrs[] = { |
2074 | &dev_attr_authorized.attr, |
2075 | &dev_attr_boot.attr, |
2076 | &dev_attr_device.attr, |
2077 | &dev_attr_device_name.attr, |
2078 | &dev_attr_generation.attr, |
2079 | &dev_attr_key.attr, |
2080 | &dev_attr_nvm_authenticate.attr, |
2081 | &dev_attr_nvm_authenticate_on_disconnect.attr, |
2082 | &dev_attr_nvm_version.attr, |
2083 | &dev_attr_rx_speed.attr, |
2084 | &dev_attr_rx_lanes.attr, |
2085 | &dev_attr_tx_speed.attr, |
2086 | &dev_attr_tx_lanes.attr, |
2087 | &dev_attr_vendor.attr, |
2088 | &dev_attr_vendor_name.attr, |
2089 | &dev_attr_unique_id.attr, |
2090 | NULL, |
2091 | }; |
2092 | |
2093 | static umode_t switch_attr_is_visible(struct kobject *kobj, |
2094 | struct attribute *attr, int n) |
2095 | { |
2096 | struct device *dev = kobj_to_dev(kobj); |
2097 | struct tb_switch *sw = tb_to_switch(dev); |
2098 | |
2099 | if (attr == &dev_attr_authorized.attr) { |
2100 | if (sw->tb->security_level == TB_SECURITY_NOPCIE || |
2101 | sw->tb->security_level == TB_SECURITY_DPONLY) |
2102 | return 0; |
2103 | } else if (attr == &dev_attr_device.attr) { |
2104 | if (!sw->device) |
2105 | return 0; |
2106 | } else if (attr == &dev_attr_device_name.attr) { |
2107 | if (!sw->device_name) |
2108 | return 0; |
2109 | } else if (attr == &dev_attr_vendor.attr) { |
2110 | if (!sw->vendor) |
2111 | return 0; |
2112 | } else if (attr == &dev_attr_vendor_name.attr) { |
2113 | if (!sw->vendor_name) |
2114 | return 0; |
2115 | } else if (attr == &dev_attr_key.attr) { |
2116 | if (tb_route(sw) && |
2117 | sw->tb->security_level == TB_SECURITY_SECURE && |
2118 | sw->security_level == TB_SECURITY_SECURE) |
2119 | return attr->mode; |
2120 | return 0; |
2121 | } else if (attr == &dev_attr_rx_speed.attr || |
2122 | attr == &dev_attr_rx_lanes.attr || |
2123 | attr == &dev_attr_tx_speed.attr || |
2124 | attr == &dev_attr_tx_lanes.attr) { |
2125 | if (tb_route(sw)) |
2126 | return attr->mode; |
2127 | return 0; |
2128 | } else if (attr == &dev_attr_nvm_authenticate.attr) { |
2129 | if (nvm_upgradeable(sw)) |
2130 | return attr->mode; |
2131 | return 0; |
2132 | } else if (attr == &dev_attr_nvm_version.attr) { |
2133 | if (nvm_readable(sw)) |
2134 | return attr->mode; |
2135 | return 0; |
2136 | } else if (attr == &dev_attr_boot.attr) { |
2137 | if (tb_route(sw)) |
2138 | return attr->mode; |
2139 | return 0; |
2140 | } else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) { |
2141 | if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER) |
2142 | return attr->mode; |
2143 | return 0; |
2144 | } |
2145 | |
2146 | return sw->safe_mode ? 0 : attr->mode; |
2147 | } |
2148 | |
2149 | static const struct attribute_group switch_group = { |
2150 | .is_visible = switch_attr_is_visible, |
2151 | .attrs = switch_attrs, |
2152 | }; |
2153 | |
2154 | static const struct attribute_group *switch_groups[] = { |
2155 | &switch_group, |
2156 | NULL, |
2157 | }; |
2158 | |
2159 | static void tb_switch_release(struct device *dev) |
2160 | { |
2161 | struct tb_switch *sw = tb_to_switch(dev); |
2162 | struct tb_port *port; |
2163 | |
2164 | dma_port_free(dma: sw->dma_port); |
2165 | |
2166 | tb_switch_for_each_port(sw, port) { |
2167 | ida_destroy(ida: &port->in_hopids); |
2168 | ida_destroy(ida: &port->out_hopids); |
2169 | } |
2170 | |
2171 | kfree(objp: sw->uuid); |
2172 | kfree(objp: sw->device_name); |
2173 | kfree(objp: sw->vendor_name); |
2174 | kfree(objp: sw->ports); |
2175 | kfree(objp: sw->drom); |
2176 | kfree(objp: sw->key); |
2177 | kfree(objp: sw); |
2178 | } |
2179 | |
2180 | static int tb_switch_uevent(const struct device *dev, struct kobj_uevent_env *env) |
2181 | { |
2182 | const struct tb_switch *sw = tb_to_switch(dev); |
2183 | const char *type; |
2184 | |
2185 | if (tb_switch_is_usb4(sw)) { |
2186 | if (add_uevent_var(env, format: "USB4_VERSION=%u.0" , |
2187 | usb4_switch_version(sw))) |
2188 | return -ENOMEM; |
2189 | } |
2190 | |
2191 | if (!tb_route(sw)) { |
2192 | type = "host" ; |
2193 | } else { |
2194 | const struct tb_port *port; |
2195 | bool hub = false; |
2196 | |
2197 | /* Device is hub if it has any downstream ports */ |
2198 | tb_switch_for_each_port(sw, port) { |
2199 | if (!port->disabled && !tb_is_upstream_port(port) && |
2200 | tb_port_is_null(port)) { |
2201 | hub = true; |
2202 | break; |
2203 | } |
2204 | } |
2205 | |
2206 | type = hub ? "hub" : "device" ; |
2207 | } |
2208 | |
2209 | if (add_uevent_var(env, format: "USB4_TYPE=%s" , type)) |
2210 | return -ENOMEM; |
2211 | return 0; |
2212 | } |
2213 | |
2214 | /* |
2215 | * Currently only need to provide the callbacks. Everything else is handled |
2216 | * in the connection manager. |
2217 | */ |
2218 | static int __maybe_unused tb_switch_runtime_suspend(struct device *dev) |
2219 | { |
2220 | struct tb_switch *sw = tb_to_switch(dev); |
2221 | const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; |
2222 | |
2223 | if (cm_ops->runtime_suspend_switch) |
2224 | return cm_ops->runtime_suspend_switch(sw); |
2225 | |
2226 | return 0; |
2227 | } |
2228 | |
2229 | static int __maybe_unused tb_switch_runtime_resume(struct device *dev) |
2230 | { |
2231 | struct tb_switch *sw = tb_to_switch(dev); |
2232 | const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; |
2233 | |
2234 | if (cm_ops->runtime_resume_switch) |
2235 | return cm_ops->runtime_resume_switch(sw); |
2236 | return 0; |
2237 | } |
2238 | |
2239 | static const struct dev_pm_ops tb_switch_pm_ops = { |
2240 | SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume, |
2241 | NULL) |
2242 | }; |
2243 | |
2244 | struct device_type tb_switch_type = { |
2245 | .name = "thunderbolt_device" , |
2246 | .release = tb_switch_release, |
2247 | .uevent = tb_switch_uevent, |
2248 | .pm = &tb_switch_pm_ops, |
2249 | }; |
2250 | |
2251 | static int tb_switch_get_generation(struct tb_switch *sw) |
2252 | { |
2253 | if (tb_switch_is_usb4(sw)) |
2254 | return 4; |
2255 | |
2256 | if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { |
2257 | switch (sw->config.device_id) { |
2258 | case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: |
2259 | case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: |
2260 | case PCI_DEVICE_ID_INTEL_LIGHT_PEAK: |
2261 | case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C: |
2262 | case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: |
2263 | case PCI_DEVICE_ID_INTEL_PORT_RIDGE: |
2264 | case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE: |
2265 | case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE: |
2266 | return 1; |
2267 | |
2268 | case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE: |
2269 | case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE: |
2270 | case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE: |
2271 | return 2; |
2272 | |
2273 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: |
2274 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: |
2275 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE: |
2276 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: |
2277 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: |
2278 | case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: |
2279 | case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: |
2280 | case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE: |
2281 | case PCI_DEVICE_ID_INTEL_ICL_NHI0: |
2282 | case PCI_DEVICE_ID_INTEL_ICL_NHI1: |
2283 | return 3; |
2284 | } |
2285 | } |
2286 | |
2287 | /* |
2288 | * For unknown switches assume generation to be 1 to be on the |
2289 | * safe side. |
2290 | */ |
2291 | tb_sw_warn(sw, "unsupported switch device id %#x\n" , |
2292 | sw->config.device_id); |
2293 | return 1; |
2294 | } |
2295 | |
2296 | static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth) |
2297 | { |
2298 | int max_depth; |
2299 | |
2300 | if (tb_switch_is_usb4(sw) || |
2301 | (sw->tb->root_switch && tb_switch_is_usb4(sw: sw->tb->root_switch))) |
2302 | max_depth = USB4_SWITCH_MAX_DEPTH; |
2303 | else |
2304 | max_depth = TB_SWITCH_MAX_DEPTH; |
2305 | |
2306 | return depth > max_depth; |
2307 | } |
2308 | |
2309 | /** |
2310 | * tb_switch_alloc() - allocate a switch |
2311 | * @tb: Pointer to the owning domain |
2312 | * @parent: Parent device for this switch |
2313 | * @route: Route string for this switch |
2314 | * |
2315 | * Allocates and initializes a switch. Will not upload configuration to |
2316 | * the switch. For that you need to call tb_switch_configure() |
2317 | * separately. The returned switch should be released by calling |
2318 | * tb_switch_put(). |
2319 | * |
2320 | * Return: Pointer to the allocated switch or ERR_PTR() in case of |
2321 | * failure. |
2322 | */ |
2323 | struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, |
2324 | u64 route) |
2325 | { |
2326 | struct tb_switch *sw; |
2327 | int upstream_port; |
2328 | int i, ret, depth; |
2329 | |
2330 | /* Unlock the downstream port so we can access the switch below */ |
2331 | if (route) { |
2332 | struct tb_switch *parent_sw = tb_to_switch(dev: parent); |
2333 | struct tb_port *down; |
2334 | |
2335 | down = tb_port_at(route, sw: parent_sw); |
2336 | tb_port_unlock(port: down); |
2337 | } |
2338 | |
2339 | depth = tb_route_length(route); |
2340 | |
2341 | upstream_port = tb_cfg_get_upstream_port(ctl: tb->ctl, route); |
2342 | if (upstream_port < 0) |
2343 | return ERR_PTR(error: upstream_port); |
2344 | |
2345 | sw = kzalloc(size: sizeof(*sw), GFP_KERNEL); |
2346 | if (!sw) |
2347 | return ERR_PTR(error: -ENOMEM); |
2348 | |
2349 | sw->tb = tb; |
2350 | ret = tb_cfg_read(ctl: tb->ctl, buffer: &sw->config, route, port: 0, space: TB_CFG_SWITCH, offset: 0, length: 5); |
2351 | if (ret) |
2352 | goto err_free_sw_ports; |
2353 | |
2354 | sw->generation = tb_switch_get_generation(sw); |
2355 | |
2356 | tb_dbg(tb, "current switch config:\n" ); |
2357 | tb_dump_switch(tb, sw); |
2358 | |
2359 | /* configure switch */ |
2360 | sw->config.upstream_port_number = upstream_port; |
2361 | sw->config.depth = depth; |
2362 | sw->config.route_hi = upper_32_bits(route); |
2363 | sw->config.route_lo = lower_32_bits(route); |
2364 | sw->config.enabled = 0; |
2365 | |
2366 | /* Make sure we do not exceed maximum topology limit */ |
2367 | if (tb_switch_exceeds_max_depth(sw, depth)) { |
2368 | ret = -EADDRNOTAVAIL; |
2369 | goto err_free_sw_ports; |
2370 | } |
2371 | |
2372 | /* initialize ports */ |
2373 | sw->ports = kcalloc(n: sw->config.max_port_number + 1, size: sizeof(*sw->ports), |
2374 | GFP_KERNEL); |
2375 | if (!sw->ports) { |
2376 | ret = -ENOMEM; |
2377 | goto err_free_sw_ports; |
2378 | } |
2379 | |
2380 | for (i = 0; i <= sw->config.max_port_number; i++) { |
2381 | /* minimum setup for tb_find_cap and tb_drom_read to work */ |
2382 | sw->ports[i].sw = sw; |
2383 | sw->ports[i].port = i; |
2384 | |
2385 | /* Control port does not need HopID allocation */ |
2386 | if (i) { |
2387 | ida_init(ida: &sw->ports[i].in_hopids); |
2388 | ida_init(ida: &sw->ports[i].out_hopids); |
2389 | } |
2390 | } |
2391 | |
2392 | ret = tb_switch_find_vse_cap(sw, vsec: TB_VSE_CAP_PLUG_EVENTS); |
2393 | if (ret > 0) |
2394 | sw->cap_plug_events = ret; |
2395 | |
2396 | ret = tb_switch_find_vse_cap(sw, vsec: TB_VSE_CAP_TIME2); |
2397 | if (ret > 0) |
2398 | sw->cap_vsec_tmu = ret; |
2399 | |
2400 | ret = tb_switch_find_vse_cap(sw, vsec: TB_VSE_CAP_LINK_CONTROLLER); |
2401 | if (ret > 0) |
2402 | sw->cap_lc = ret; |
2403 | |
2404 | ret = tb_switch_find_vse_cap(sw, vsec: TB_VSE_CAP_CP_LP); |
2405 | if (ret > 0) |
2406 | sw->cap_lp = ret; |
2407 | |
2408 | /* Root switch is always authorized */ |
2409 | if (!route) |
2410 | sw->authorized = true; |
2411 | |
2412 | device_initialize(dev: &sw->dev); |
2413 | sw->dev.parent = parent; |
2414 | sw->dev.bus = &tb_bus_type; |
2415 | sw->dev.type = &tb_switch_type; |
2416 | sw->dev.groups = switch_groups; |
2417 | dev_set_name(dev: &sw->dev, name: "%u-%llx" , tb->index, tb_route(sw)); |
2418 | |
2419 | return sw; |
2420 | |
2421 | err_free_sw_ports: |
2422 | kfree(objp: sw->ports); |
2423 | kfree(objp: sw); |
2424 | |
2425 | return ERR_PTR(error: ret); |
2426 | } |
2427 | |
2428 | /** |
2429 | * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode |
2430 | * @tb: Pointer to the owning domain |
2431 | * @parent: Parent device for this switch |
2432 | * @route: Route string for this switch |
2433 | * |
2434 | * This creates a switch in safe mode. This means the switch pretty much |
2435 | * lacks all capabilities except DMA configuration port before it is |
2436 | * flashed with a valid NVM firmware. |
2437 | * |
2438 | * The returned switch must be released by calling tb_switch_put(). |
2439 | * |
2440 | * Return: Pointer to the allocated switch or ERR_PTR() in case of failure |
2441 | */ |
2442 | struct tb_switch * |
2443 | tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route) |
2444 | { |
2445 | struct tb_switch *sw; |
2446 | |
2447 | sw = kzalloc(size: sizeof(*sw), GFP_KERNEL); |
2448 | if (!sw) |
2449 | return ERR_PTR(error: -ENOMEM); |
2450 | |
2451 | sw->tb = tb; |
2452 | sw->config.depth = tb_route_length(route); |
2453 | sw->config.route_hi = upper_32_bits(route); |
2454 | sw->config.route_lo = lower_32_bits(route); |
2455 | sw->safe_mode = true; |
2456 | |
2457 | device_initialize(dev: &sw->dev); |
2458 | sw->dev.parent = parent; |
2459 | sw->dev.bus = &tb_bus_type; |
2460 | sw->dev.type = &tb_switch_type; |
2461 | sw->dev.groups = switch_groups; |
2462 | dev_set_name(dev: &sw->dev, name: "%u-%llx" , tb->index, tb_route(sw)); |
2463 | |
2464 | return sw; |
2465 | } |
2466 | |
2467 | /** |
2468 | * tb_switch_configure() - Uploads configuration to the switch |
2469 | * @sw: Switch to configure |
2470 | * |
2471 | * Call this function before the switch is added to the system. It will |
2472 | * upload configuration to the switch and makes it available for the |
2473 | * connection manager to use. Can be called to the switch again after |
2474 | * resume from low power states to re-initialize it. |
2475 | * |
2476 | * Return: %0 in case of success and negative errno in case of failure |
2477 | */ |
2478 | int tb_switch_configure(struct tb_switch *sw) |
2479 | { |
2480 | struct tb *tb = sw->tb; |
2481 | u64 route; |
2482 | int ret; |
2483 | |
2484 | route = tb_route(sw); |
2485 | |
2486 | tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n" , |
2487 | sw->config.enabled ? "restoring" : "initializing" , route, |
2488 | tb_route_length(route), sw->config.upstream_port_number); |
2489 | |
2490 | sw->config.enabled = 1; |
2491 | |
2492 | if (tb_switch_is_usb4(sw)) { |
2493 | /* |
2494 | * For USB4 devices, we need to program the CM version |
2495 | * accordingly so that it knows to expose all the |
2496 | * additional capabilities. Program it according to USB4 |
2497 | * version to avoid changing existing (v1) routers behaviour. |
2498 | */ |
2499 | if (usb4_switch_version(sw) < 2) |
2500 | sw->config.cmuv = ROUTER_CS_4_CMUV_V1; |
2501 | else |
2502 | sw->config.cmuv = ROUTER_CS_4_CMUV_V2; |
2503 | sw->config.plug_events_delay = 0xa; |
2504 | |
2505 | /* Enumerate the switch */ |
2506 | ret = tb_sw_write(sw, buffer: (u32 *)&sw->config + 1, space: TB_CFG_SWITCH, |
2507 | ROUTER_CS_1, length: 4); |
2508 | if (ret) |
2509 | return ret; |
2510 | |
2511 | ret = usb4_switch_setup(sw); |
2512 | } else { |
2513 | if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) |
2514 | tb_sw_warn(sw, "unknown switch vendor id %#x\n" , |
2515 | sw->config.vendor_id); |
2516 | |
2517 | if (!sw->cap_plug_events) { |
2518 | tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n" ); |
2519 | return -ENODEV; |
2520 | } |
2521 | |
2522 | /* Enumerate the switch */ |
2523 | ret = tb_sw_write(sw, buffer: (u32 *)&sw->config + 1, space: TB_CFG_SWITCH, |
2524 | ROUTER_CS_1, length: 3); |
2525 | } |
2526 | if (ret) |
2527 | return ret; |
2528 | |
2529 | return tb_plug_events_active(sw, active: true); |
2530 | } |
2531 | |
2532 | /** |
2533 | * tb_switch_configuration_valid() - Set the tunneling configuration to be valid |
2534 | * @sw: Router to configure |
2535 | * |
2536 | * Needs to be called before any tunnels can be setup through the |
2537 | * router. Can be called to any router. |
2538 | * |
2539 | * Returns %0 in success and negative errno otherwise. |
2540 | */ |
2541 | int tb_switch_configuration_valid(struct tb_switch *sw) |
2542 | { |
2543 | if (tb_switch_is_usb4(sw)) |
2544 | return usb4_switch_configuration_valid(sw); |
2545 | return 0; |
2546 | } |
2547 | |
2548 | static int tb_switch_set_uuid(struct tb_switch *sw) |
2549 | { |
2550 | bool uid = false; |
2551 | u32 uuid[4]; |
2552 | int ret; |
2553 | |
2554 | if (sw->uuid) |
2555 | return 0; |
2556 | |
2557 | if (tb_switch_is_usb4(sw)) { |
2558 | ret = usb4_switch_read_uid(sw, uid: &sw->uid); |
2559 | if (ret) |
2560 | return ret; |
2561 | uid = true; |
2562 | } else { |
2563 | /* |
2564 | * The newer controllers include fused UUID as part of |
2565 | * link controller specific registers |
2566 | */ |
2567 | ret = tb_lc_read_uuid(sw, uuid); |
2568 | if (ret) { |
2569 | if (ret != -EINVAL) |
2570 | return ret; |
2571 | uid = true; |
2572 | } |
2573 | } |
2574 | |
2575 | if (uid) { |
2576 | /* |
2577 | * ICM generates UUID based on UID and fills the upper |
2578 | * two words with ones. This is not strictly following |
2579 | * UUID format but we want to be compatible with it so |
2580 | * we do the same here. |
2581 | */ |
2582 | uuid[0] = sw->uid & 0xffffffff; |
2583 | uuid[1] = (sw->uid >> 32) & 0xffffffff; |
2584 | uuid[2] = 0xffffffff; |
2585 | uuid[3] = 0xffffffff; |
2586 | } |
2587 | |
2588 | sw->uuid = kmemdup(p: uuid, size: sizeof(uuid), GFP_KERNEL); |
2589 | if (!sw->uuid) |
2590 | return -ENOMEM; |
2591 | return 0; |
2592 | } |
2593 | |
2594 | static int tb_switch_add_dma_port(struct tb_switch *sw) |
2595 | { |
2596 | u32 status; |
2597 | int ret; |
2598 | |
2599 | switch (sw->generation) { |
2600 | case 2: |
2601 | /* Only root switch can be upgraded */ |
2602 | if (tb_route(sw)) |
2603 | return 0; |
2604 | |
2605 | fallthrough; |
2606 | case 3: |
2607 | case 4: |
2608 | ret = tb_switch_set_uuid(sw); |
2609 | if (ret) |
2610 | return ret; |
2611 | break; |
2612 | |
2613 | default: |
2614 | /* |
2615 | * DMA port is the only thing available when the switch |
2616 | * is in safe mode. |
2617 | */ |
2618 | if (!sw->safe_mode) |
2619 | return 0; |
2620 | break; |
2621 | } |
2622 | |
2623 | if (sw->no_nvm_upgrade) |
2624 | return 0; |
2625 | |
2626 | if (tb_switch_is_usb4(sw)) { |
2627 | ret = usb4_switch_nvm_authenticate_status(sw, status: &status); |
2628 | if (ret) |
2629 | return ret; |
2630 | |
2631 | if (status) { |
2632 | tb_sw_info(sw, "switch flash authentication failed\n" ); |
2633 | nvm_set_auth_status(sw, status); |
2634 | } |
2635 | |
2636 | return 0; |
2637 | } |
2638 | |
2639 | /* Root switch DMA port requires running firmware */ |
2640 | if (!tb_route(sw) && !tb_switch_is_icm(sw)) |
2641 | return 0; |
2642 | |
2643 | sw->dma_port = dma_port_alloc(sw); |
2644 | if (!sw->dma_port) |
2645 | return 0; |
2646 | |
2647 | /* |
2648 | * If there is status already set then authentication failed |
2649 | * when the dma_port_flash_update_auth() returned. Power cycling |
2650 | * is not needed (it was done already) so only thing we do here |
2651 | * is to unblock runtime PM of the root port. |
2652 | */ |
2653 | nvm_get_auth_status(sw, status: &status); |
2654 | if (status) { |
2655 | if (!tb_route(sw)) |
2656 | nvm_authenticate_complete_dma_port(sw); |
2657 | return 0; |
2658 | } |
2659 | |
2660 | /* |
2661 | * Check status of the previous flash authentication. If there |
2662 | * is one we need to power cycle the switch in any case to make |
2663 | * it functional again. |
2664 | */ |
2665 | ret = dma_port_flash_update_auth_status(dma: sw->dma_port, status: &status); |
2666 | if (ret <= 0) |
2667 | return ret; |
2668 | |
2669 | /* Now we can allow root port to suspend again */ |
2670 | if (!tb_route(sw)) |
2671 | nvm_authenticate_complete_dma_port(sw); |
2672 | |
2673 | if (status) { |
2674 | tb_sw_info(sw, "switch flash authentication failed\n" ); |
2675 | nvm_set_auth_status(sw, status); |
2676 | } |
2677 | |
2678 | tb_sw_info(sw, "power cycling the switch now\n" ); |
2679 | dma_port_power_cycle(dma: sw->dma_port); |
2680 | |
2681 | /* |
2682 | * We return error here which causes the switch adding failure. |
2683 | * It should appear back after power cycle is complete. |
2684 | */ |
2685 | return -ESHUTDOWN; |
2686 | } |
2687 | |
2688 | static void tb_switch_default_link_ports(struct tb_switch *sw) |
2689 | { |
2690 | int i; |
2691 | |
2692 | for (i = 1; i <= sw->config.max_port_number; i++) { |
2693 | struct tb_port *port = &sw->ports[i]; |
2694 | struct tb_port *subordinate; |
2695 | |
2696 | if (!tb_port_is_null(port)) |
2697 | continue; |
2698 | |
2699 | /* Check for the subordinate port */ |
2700 | if (i == sw->config.max_port_number || |
2701 | !tb_port_is_null(port: &sw->ports[i + 1])) |
2702 | continue; |
2703 | |
2704 | /* Link them if not already done so (by DROM) */ |
2705 | subordinate = &sw->ports[i + 1]; |
2706 | if (!port->dual_link_port && !subordinate->dual_link_port) { |
2707 | port->link_nr = 0; |
2708 | port->dual_link_port = subordinate; |
2709 | subordinate->link_nr = 1; |
2710 | subordinate->dual_link_port = port; |
2711 | |
2712 | tb_sw_dbg(sw, "linked ports %d <-> %d\n" , |
2713 | port->port, subordinate->port); |
2714 | } |
2715 | } |
2716 | } |
2717 | |
2718 | static bool tb_switch_lane_bonding_possible(struct tb_switch *sw) |
2719 | { |
2720 | const struct tb_port *up = tb_upstream_port(sw); |
2721 | |
2722 | if (!up->dual_link_port || !up->dual_link_port->remote) |
2723 | return false; |
2724 | |
2725 | if (tb_switch_is_usb4(sw)) |
2726 | return usb4_switch_lane_bonding_possible(sw); |
2727 | return tb_lc_lane_bonding_possible(sw); |
2728 | } |
2729 | |
2730 | static int tb_switch_update_link_attributes(struct tb_switch *sw) |
2731 | { |
2732 | struct tb_port *up; |
2733 | bool change = false; |
2734 | int ret; |
2735 | |
2736 | if (!tb_route(sw) || tb_switch_is_icm(sw)) |
2737 | return 0; |
2738 | |
2739 | up = tb_upstream_port(sw); |
2740 | |
2741 | ret = tb_port_get_link_speed(port: up); |
2742 | if (ret < 0) |
2743 | return ret; |
2744 | if (sw->link_speed != ret) |
2745 | change = true; |
2746 | sw->link_speed = ret; |
2747 | |
2748 | ret = tb_port_get_link_width(port: up); |
2749 | if (ret < 0) |
2750 | return ret; |
2751 | if (sw->link_width != ret) |
2752 | change = true; |
2753 | sw->link_width = ret; |
2754 | |
2755 | /* Notify userspace that there is possible link attribute change */ |
2756 | if (device_is_registered(dev: &sw->dev) && change) |
2757 | kobject_uevent(kobj: &sw->dev.kobj, action: KOBJ_CHANGE); |
2758 | |
2759 | return 0; |
2760 | } |
2761 | |
2762 | /* Must be called after tb_switch_update_link_attributes() */ |
2763 | static void tb_switch_link_init(struct tb_switch *sw) |
2764 | { |
2765 | struct tb_port *up, *down; |
2766 | bool bonded; |
2767 | |
2768 | if (!tb_route(sw) || tb_switch_is_icm(sw)) |
2769 | return; |
2770 | |
2771 | tb_sw_dbg(sw, "current link speed %u.0 Gb/s\n" , sw->link_speed); |
2772 | tb_sw_dbg(sw, "current link width %s\n" , width_name(sw->link_width)); |
2773 | |
2774 | bonded = sw->link_width >= TB_LINK_WIDTH_DUAL; |
2775 | |
2776 | /* |
2777 | * Gen 4 links come up as bonded so update the port structures |
2778 | * accordingly. |
2779 | */ |
2780 | up = tb_upstream_port(sw); |
2781 | down = tb_switch_downstream_port(sw); |
2782 | |
2783 | up->bonded = bonded; |
2784 | if (up->dual_link_port) |
2785 | up->dual_link_port->bonded = bonded; |
2786 | tb_port_update_credits(port: up); |
2787 | |
2788 | down->bonded = bonded; |
2789 | if (down->dual_link_port) |
2790 | down->dual_link_port->bonded = bonded; |
2791 | tb_port_update_credits(port: down); |
2792 | } |
2793 | |
2794 | /** |
2795 | * tb_switch_lane_bonding_enable() - Enable lane bonding |
2796 | * @sw: Switch to enable lane bonding |
2797 | * |
2798 | * Connection manager can call this function to enable lane bonding of a |
2799 | * switch. If conditions are correct and both switches support the feature, |
2800 | * lanes are bonded. It is safe to call this to any switch. |
2801 | */ |
2802 | static int tb_switch_lane_bonding_enable(struct tb_switch *sw) |
2803 | { |
2804 | struct tb_port *up, *down; |
2805 | unsigned int width; |
2806 | int ret; |
2807 | |
2808 | if (!tb_switch_lane_bonding_possible(sw)) |
2809 | return 0; |
2810 | |
2811 | up = tb_upstream_port(sw); |
2812 | down = tb_switch_downstream_port(sw); |
2813 | |
2814 | if (!tb_port_width_supported(port: up, width: TB_LINK_WIDTH_DUAL) || |
2815 | !tb_port_width_supported(port: down, width: TB_LINK_WIDTH_DUAL)) |
2816 | return 0; |
2817 | |
2818 | /* |
2819 | * Both lanes need to be in CL0. Here we assume lane 0 already be in |
2820 | * CL0 and check just for lane 1. |
2821 | */ |
2822 | if (tb_wait_for_port(port: down->dual_link_port, wait_if_unplugged: false) <= 0) |
2823 | return -ENOTCONN; |
2824 | |
2825 | ret = tb_port_lane_bonding_enable(port: up); |
2826 | if (ret) { |
2827 | tb_port_warn(up, "failed to enable lane bonding\n" ); |
2828 | return ret; |
2829 | } |
2830 | |
2831 | ret = tb_port_lane_bonding_enable(port: down); |
2832 | if (ret) { |
2833 | tb_port_warn(down, "failed to enable lane bonding\n" ); |
2834 | tb_port_lane_bonding_disable(port: up); |
2835 | return ret; |
2836 | } |
2837 | |
2838 | /* Any of the widths are all bonded */ |
2839 | width = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX | |
2840 | TB_LINK_WIDTH_ASYM_RX; |
2841 | |
2842 | return tb_port_wait_for_link_width(port: down, width, timeout_msec: 100); |
2843 | } |
2844 | |
2845 | /** |
2846 | * tb_switch_lane_bonding_disable() - Disable lane bonding |
2847 | * @sw: Switch whose lane bonding to disable |
2848 | * |
2849 | * Disables lane bonding between @sw and parent. This can be called even |
2850 | * if lanes were not bonded originally. |
2851 | */ |
2852 | static int tb_switch_lane_bonding_disable(struct tb_switch *sw) |
2853 | { |
2854 | struct tb_port *up, *down; |
2855 | int ret; |
2856 | |
2857 | up = tb_upstream_port(sw); |
2858 | if (!up->bonded) |
2859 | return 0; |
2860 | |
2861 | /* |
2862 | * If the link is Gen 4 there is no way to switch the link to |
2863 | * two single lane links so avoid that here. Also don't bother |
2864 | * if the link is not up anymore (sw is unplugged). |
2865 | */ |
2866 | ret = tb_port_get_link_generation(port: up); |
2867 | if (ret < 0) |
2868 | return ret; |
2869 | if (ret >= 4) |
2870 | return -EOPNOTSUPP; |
2871 | |
2872 | down = tb_switch_downstream_port(sw); |
2873 | tb_port_lane_bonding_disable(port: up); |
2874 | tb_port_lane_bonding_disable(port: down); |
2875 | |
2876 | /* |
2877 | * It is fine if we get other errors as the router might have |
2878 | * been unplugged. |
2879 | */ |
2880 | return tb_port_wait_for_link_width(port: down, width: TB_LINK_WIDTH_SINGLE, timeout_msec: 100); |
2881 | } |
2882 | |
2883 | static int tb_switch_asym_enable(struct tb_switch *sw, enum tb_link_width width) |
2884 | { |
2885 | struct tb_port *up, *down, *port; |
2886 | enum tb_link_width down_width; |
2887 | int ret; |
2888 | |
2889 | up = tb_upstream_port(sw); |
2890 | down = tb_switch_downstream_port(sw); |
2891 | |
2892 | if (width == TB_LINK_WIDTH_ASYM_TX) { |
2893 | down_width = TB_LINK_WIDTH_ASYM_RX; |
2894 | port = down; |
2895 | } else { |
2896 | down_width = TB_LINK_WIDTH_ASYM_TX; |
2897 | port = up; |
2898 | } |
2899 | |
2900 | ret = tb_port_set_link_width(port: up, width); |
2901 | if (ret) |
2902 | return ret; |
2903 | |
2904 | ret = tb_port_set_link_width(port: down, width: down_width); |
2905 | if (ret) |
2906 | return ret; |
2907 | |
2908 | /* |
2909 | * Initiate the change in the router that one of its TX lanes is |
2910 | * changing to RX but do so only if there is an actual change. |
2911 | */ |
2912 | if (sw->link_width != width) { |
2913 | ret = usb4_port_asym_start(port); |
2914 | if (ret) |
2915 | return ret; |
2916 | |
2917 | ret = tb_port_wait_for_link_width(port: up, width, timeout_msec: 100); |
2918 | if (ret) |
2919 | return ret; |
2920 | } |
2921 | |
2922 | sw->link_width = width; |
2923 | return 0; |
2924 | } |
2925 | |
2926 | static int tb_switch_asym_disable(struct tb_switch *sw) |
2927 | { |
2928 | struct tb_port *up, *down; |
2929 | int ret; |
2930 | |
2931 | up = tb_upstream_port(sw); |
2932 | down = tb_switch_downstream_port(sw); |
2933 | |
2934 | ret = tb_port_set_link_width(port: up, width: TB_LINK_WIDTH_DUAL); |
2935 | if (ret) |
2936 | return ret; |
2937 | |
2938 | ret = tb_port_set_link_width(port: down, width: TB_LINK_WIDTH_DUAL); |
2939 | if (ret) |
2940 | return ret; |
2941 | |
2942 | /* |
2943 | * Initiate the change in the router that has three TX lanes and |
2944 | * is changing one of its TX lanes to RX but only if there is a |
2945 | * change in the link width. |
2946 | */ |
2947 | if (sw->link_width > TB_LINK_WIDTH_DUAL) { |
2948 | if (sw->link_width == TB_LINK_WIDTH_ASYM_TX) |
2949 | ret = usb4_port_asym_start(port: up); |
2950 | else |
2951 | ret = usb4_port_asym_start(port: down); |
2952 | if (ret) |
2953 | return ret; |
2954 | |
2955 | ret = tb_port_wait_for_link_width(port: up, width: TB_LINK_WIDTH_DUAL, timeout_msec: 100); |
2956 | if (ret) |
2957 | return ret; |
2958 | } |
2959 | |
2960 | sw->link_width = TB_LINK_WIDTH_DUAL; |
2961 | return 0; |
2962 | } |
2963 | |
2964 | /** |
2965 | * tb_switch_set_link_width() - Configure router link width |
2966 | * @sw: Router to configure |
2967 | * @width: The new link width |
2968 | * |
2969 | * Set device router link width to @width from router upstream port |
2970 | * perspective. Supports also asymmetric links if the routers boths side |
2971 | * of the link supports it. |
2972 | * |
2973 | * Does nothing for host router. |
2974 | * |
2975 | * Returns %0 in case of success, negative errno otherwise. |
2976 | */ |
2977 | int tb_switch_set_link_width(struct tb_switch *sw, enum tb_link_width width) |
2978 | { |
2979 | struct tb_port *up, *down; |
2980 | int ret = 0; |
2981 | |
2982 | if (!tb_route(sw)) |
2983 | return 0; |
2984 | |
2985 | up = tb_upstream_port(sw); |
2986 | down = tb_switch_downstream_port(sw); |
2987 | |
2988 | switch (width) { |
2989 | case TB_LINK_WIDTH_SINGLE: |
2990 | ret = tb_switch_lane_bonding_disable(sw); |
2991 | break; |
2992 | |
2993 | case TB_LINK_WIDTH_DUAL: |
2994 | if (sw->link_width == TB_LINK_WIDTH_ASYM_TX || |
2995 | sw->link_width == TB_LINK_WIDTH_ASYM_RX) { |
2996 | ret = tb_switch_asym_disable(sw); |
2997 | if (ret) |
2998 | break; |
2999 | } |
3000 | ret = tb_switch_lane_bonding_enable(sw); |
3001 | break; |
3002 | |
3003 | case TB_LINK_WIDTH_ASYM_TX: |
3004 | case TB_LINK_WIDTH_ASYM_RX: |
3005 | ret = tb_switch_asym_enable(sw, width); |
3006 | break; |
3007 | } |
3008 | |
3009 | switch (ret) { |
3010 | case 0: |
3011 | break; |
3012 | |
3013 | case -ETIMEDOUT: |
3014 | tb_sw_warn(sw, "timeout changing link width\n" ); |
3015 | return ret; |
3016 | |
3017 | case -ENOTCONN: |
3018 | case -EOPNOTSUPP: |
3019 | case -ENODEV: |
3020 | return ret; |
3021 | |
3022 | default: |
3023 | tb_sw_dbg(sw, "failed to change link width: %d\n" , ret); |
3024 | return ret; |
3025 | } |
3026 | |
3027 | tb_port_update_credits(port: down); |
3028 | tb_port_update_credits(port: up); |
3029 | |
3030 | tb_switch_update_link_attributes(sw); |
3031 | |
3032 | tb_sw_dbg(sw, "link width set to %s\n" , width_name(width)); |
3033 | return ret; |
3034 | } |
3035 | |
3036 | /** |
3037 | * tb_switch_configure_link() - Set link configured |
3038 | * @sw: Switch whose link is configured |
3039 | * |
3040 | * Sets the link upstream from @sw configured (from both ends) so that |
3041 | * it will not be disconnected when the domain exits sleep. Can be |
3042 | * called for any switch. |
3043 | * |
3044 | * It is recommended that this is called after lane bonding is enabled. |
3045 | * |
3046 | * Returns %0 on success and negative errno in case of error. |
3047 | */ |
3048 | int tb_switch_configure_link(struct tb_switch *sw) |
3049 | { |
3050 | struct tb_port *up, *down; |
3051 | int ret; |
3052 | |
3053 | if (!tb_route(sw) || tb_switch_is_icm(sw)) |
3054 | return 0; |
3055 | |
3056 | up = tb_upstream_port(sw); |
3057 | if (tb_switch_is_usb4(sw: up->sw)) |
3058 | ret = usb4_port_configure(port: up); |
3059 | else |
3060 | ret = tb_lc_configure_port(port: up); |
3061 | if (ret) |
3062 | return ret; |
3063 | |
3064 | down = up->remote; |
3065 | if (tb_switch_is_usb4(sw: down->sw)) |
3066 | return usb4_port_configure(port: down); |
3067 | return tb_lc_configure_port(port: down); |
3068 | } |
3069 | |
3070 | /** |
3071 | * tb_switch_unconfigure_link() - Unconfigure link |
3072 | * @sw: Switch whose link is unconfigured |
3073 | * |
3074 | * Sets the link unconfigured so the @sw will be disconnected if the |
3075 | * domain exists sleep. |
3076 | */ |
3077 | void tb_switch_unconfigure_link(struct tb_switch *sw) |
3078 | { |
3079 | struct tb_port *up, *down; |
3080 | |
3081 | if (sw->is_unplugged) |
3082 | return; |
3083 | if (!tb_route(sw) || tb_switch_is_icm(sw)) |
3084 | return; |
3085 | |
3086 | up = tb_upstream_port(sw); |
3087 | if (tb_switch_is_usb4(sw: up->sw)) |
3088 | usb4_port_unconfigure(port: up); |
3089 | else |
3090 | tb_lc_unconfigure_port(port: up); |
3091 | |
3092 | down = up->remote; |
3093 | if (tb_switch_is_usb4(sw: down->sw)) |
3094 | usb4_port_unconfigure(port: down); |
3095 | else |
3096 | tb_lc_unconfigure_port(port: down); |
3097 | } |
3098 | |
3099 | static void tb_switch_credits_init(struct tb_switch *sw) |
3100 | { |
3101 | if (tb_switch_is_icm(sw)) |
3102 | return; |
3103 | if (!tb_switch_is_usb4(sw)) |
3104 | return; |
3105 | if (usb4_switch_credits_init(sw)) |
3106 | tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n" ); |
3107 | } |
3108 | |
3109 | static int tb_switch_port_hotplug_enable(struct tb_switch *sw) |
3110 | { |
3111 | struct tb_port *port; |
3112 | |
3113 | if (tb_switch_is_icm(sw)) |
3114 | return 0; |
3115 | |
3116 | tb_switch_for_each_port(sw, port) { |
3117 | int res; |
3118 | |
3119 | if (!port->cap_usb4) |
3120 | continue; |
3121 | |
3122 | res = usb4_port_hotplug_enable(port); |
3123 | if (res) |
3124 | return res; |
3125 | } |
3126 | return 0; |
3127 | } |
3128 | |
3129 | /** |
3130 | * tb_switch_add() - Add a switch to the domain |
3131 | * @sw: Switch to add |
3132 | * |
3133 | * This is the last step in adding switch to the domain. It will read |
3134 | * identification information from DROM and initializes ports so that |
3135 | * they can be used to connect other switches. The switch will be |
3136 | * exposed to the userspace when this function successfully returns. To |
3137 | * remove and release the switch, call tb_switch_remove(). |
3138 | * |
3139 | * Return: %0 in case of success and negative errno in case of failure |
3140 | */ |
3141 | int tb_switch_add(struct tb_switch *sw) |
3142 | { |
3143 | int i, ret; |
3144 | |
3145 | /* |
3146 | * Initialize DMA control port now before we read DROM. Recent |
3147 | * host controllers have more complete DROM on NVM that includes |
3148 | * vendor and model identification strings which we then expose |
3149 | * to the userspace. NVM can be accessed through DMA |
3150 | * configuration based mailbox. |
3151 | */ |
3152 | ret = tb_switch_add_dma_port(sw); |
3153 | if (ret) { |
3154 | dev_err(&sw->dev, "failed to add DMA port\n" ); |
3155 | return ret; |
3156 | } |
3157 | |
3158 | if (!sw->safe_mode) { |
3159 | tb_switch_credits_init(sw); |
3160 | |
3161 | /* read drom */ |
3162 | ret = tb_drom_read(sw); |
3163 | if (ret) |
3164 | dev_warn(&sw->dev, "reading DROM failed: %d\n" , ret); |
3165 | tb_sw_dbg(sw, "uid: %#llx\n" , sw->uid); |
3166 | |
3167 | ret = tb_switch_set_uuid(sw); |
3168 | if (ret) { |
3169 | dev_err(&sw->dev, "failed to set UUID\n" ); |
3170 | return ret; |
3171 | } |
3172 | |
3173 | for (i = 0; i <= sw->config.max_port_number; i++) { |
3174 | if (sw->ports[i].disabled) { |
3175 | tb_port_dbg(&sw->ports[i], "disabled by eeprom\n" ); |
3176 | continue; |
3177 | } |
3178 | ret = tb_init_port(port: &sw->ports[i]); |
3179 | if (ret) { |
3180 | dev_err(&sw->dev, "failed to initialize port %d\n" , i); |
3181 | return ret; |
3182 | } |
3183 | } |
3184 | |
3185 | tb_check_quirks(sw); |
3186 | |
3187 | tb_switch_default_link_ports(sw); |
3188 | |
3189 | ret = tb_switch_update_link_attributes(sw); |
3190 | if (ret) |
3191 | return ret; |
3192 | |
3193 | tb_switch_link_init(sw); |
3194 | |
3195 | ret = tb_switch_clx_init(sw); |
3196 | if (ret) |
3197 | return ret; |
3198 | |
3199 | ret = tb_switch_tmu_init(sw); |
3200 | if (ret) |
3201 | return ret; |
3202 | } |
3203 | |
3204 | ret = tb_switch_port_hotplug_enable(sw); |
3205 | if (ret) |
3206 | return ret; |
3207 | |
3208 | ret = device_add(dev: &sw->dev); |
3209 | if (ret) { |
3210 | dev_err(&sw->dev, "failed to add device: %d\n" , ret); |
3211 | return ret; |
3212 | } |
3213 | |
3214 | if (tb_route(sw)) { |
3215 | dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n" , |
3216 | sw->vendor, sw->device); |
3217 | if (sw->vendor_name && sw->device_name) |
3218 | dev_info(&sw->dev, "%s %s\n" , sw->vendor_name, |
3219 | sw->device_name); |
3220 | } |
3221 | |
3222 | ret = usb4_switch_add_ports(sw); |
3223 | if (ret) { |
3224 | dev_err(&sw->dev, "failed to add USB4 ports\n" ); |
3225 | goto err_del; |
3226 | } |
3227 | |
3228 | ret = tb_switch_nvm_add(sw); |
3229 | if (ret) { |
3230 | dev_err(&sw->dev, "failed to add NVM devices\n" ); |
3231 | goto err_ports; |
3232 | } |
3233 | |
3234 | /* |
3235 | * Thunderbolt routers do not generate wakeups themselves but |
3236 | * they forward wakeups from tunneled protocols, so enable it |
3237 | * here. |
3238 | */ |
3239 | device_init_wakeup(dev: &sw->dev, enable: true); |
3240 | |
3241 | pm_runtime_set_active(dev: &sw->dev); |
3242 | if (sw->rpm) { |
3243 | pm_runtime_set_autosuspend_delay(dev: &sw->dev, TB_AUTOSUSPEND_DELAY); |
3244 | pm_runtime_use_autosuspend(dev: &sw->dev); |
3245 | pm_runtime_mark_last_busy(dev: &sw->dev); |
3246 | pm_runtime_enable(dev: &sw->dev); |
3247 | pm_request_autosuspend(dev: &sw->dev); |
3248 | } |
3249 | |
3250 | tb_switch_debugfs_init(sw); |
3251 | return 0; |
3252 | |
3253 | err_ports: |
3254 | usb4_switch_remove_ports(sw); |
3255 | err_del: |
3256 | device_del(dev: &sw->dev); |
3257 | |
3258 | return ret; |
3259 | } |
3260 | |
3261 | /** |
3262 | * tb_switch_remove() - Remove and release a switch |
3263 | * @sw: Switch to remove |
3264 | * |
3265 | * This will remove the switch from the domain and release it after last |
3266 | * reference count drops to zero. If there are switches connected below |
3267 | * this switch, they will be removed as well. |
3268 | */ |
3269 | void tb_switch_remove(struct tb_switch *sw) |
3270 | { |
3271 | struct tb_port *port; |
3272 | |
3273 | tb_switch_debugfs_remove(sw); |
3274 | |
3275 | if (sw->rpm) { |
3276 | pm_runtime_get_sync(dev: &sw->dev); |
3277 | pm_runtime_disable(dev: &sw->dev); |
3278 | } |
3279 | |
3280 | /* port 0 is the switch itself and never has a remote */ |
3281 | tb_switch_for_each_port(sw, port) { |
3282 | if (tb_port_has_remote(port)) { |
3283 | tb_switch_remove(sw: port->remote->sw); |
3284 | port->remote = NULL; |
3285 | } else if (port->xdomain) { |
3286 | tb_xdomain_remove(xd: port->xdomain); |
3287 | port->xdomain = NULL; |
3288 | } |
3289 | |
3290 | /* Remove any downstream retimers */ |
3291 | tb_retimer_remove_all(port); |
3292 | } |
3293 | |
3294 | if (!sw->is_unplugged) |
3295 | tb_plug_events_active(sw, active: false); |
3296 | |
3297 | tb_switch_nvm_remove(sw); |
3298 | usb4_switch_remove_ports(sw); |
3299 | |
3300 | if (tb_route(sw)) |
3301 | dev_info(&sw->dev, "device disconnected\n" ); |
3302 | device_unregister(dev: &sw->dev); |
3303 | } |
3304 | |
3305 | /** |
3306 | * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches |
3307 | * @sw: Router to mark unplugged |
3308 | */ |
3309 | void tb_sw_set_unplugged(struct tb_switch *sw) |
3310 | { |
3311 | struct tb_port *port; |
3312 | |
3313 | if (sw == sw->tb->root_switch) { |
3314 | tb_sw_WARN(sw, "cannot unplug root switch\n" ); |
3315 | return; |
3316 | } |
3317 | if (sw->is_unplugged) { |
3318 | tb_sw_WARN(sw, "is_unplugged already set\n" ); |
3319 | return; |
3320 | } |
3321 | sw->is_unplugged = true; |
3322 | tb_switch_for_each_port(sw, port) { |
3323 | if (tb_port_has_remote(port)) |
3324 | tb_sw_set_unplugged(sw: port->remote->sw); |
3325 | else if (port->xdomain) |
3326 | port->xdomain->is_unplugged = true; |
3327 | } |
3328 | } |
3329 | |
3330 | static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags) |
3331 | { |
3332 | if (flags) |
3333 | tb_sw_dbg(sw, "enabling wakeup: %#x\n" , flags); |
3334 | else |
3335 | tb_sw_dbg(sw, "disabling wakeup\n" ); |
3336 | |
3337 | if (tb_switch_is_usb4(sw)) |
3338 | return usb4_switch_set_wake(sw, flags); |
3339 | return tb_lc_set_wake(sw, flags); |
3340 | } |
3341 | |
3342 | int tb_switch_resume(struct tb_switch *sw) |
3343 | { |
3344 | struct tb_port *port; |
3345 | int err; |
3346 | |
3347 | tb_sw_dbg(sw, "resuming switch\n" ); |
3348 | |
3349 | /* |
3350 | * Check for UID of the connected switches except for root |
3351 | * switch which we assume cannot be removed. |
3352 | */ |
3353 | if (tb_route(sw)) { |
3354 | u64 uid; |
3355 | |
3356 | /* |
3357 | * Check first that we can still read the switch config |
3358 | * space. It may be that there is now another domain |
3359 | * connected. |
3360 | */ |
3361 | err = tb_cfg_get_upstream_port(ctl: sw->tb->ctl, route: tb_route(sw)); |
3362 | if (err < 0) { |
3363 | tb_sw_info(sw, "switch not present anymore\n" ); |
3364 | return err; |
3365 | } |
3366 | |
3367 | /* We don't have any way to confirm this was the same device */ |
3368 | if (!sw->uid) |
3369 | return -ENODEV; |
3370 | |
3371 | if (tb_switch_is_usb4(sw)) |
3372 | err = usb4_switch_read_uid(sw, uid: &uid); |
3373 | else |
3374 | err = tb_drom_read_uid_only(sw, uid: &uid); |
3375 | if (err) { |
3376 | tb_sw_warn(sw, "uid read failed\n" ); |
3377 | return err; |
3378 | } |
3379 | if (sw->uid != uid) { |
3380 | tb_sw_info(sw, |
3381 | "changed while suspended (uid %#llx -> %#llx)\n" , |
3382 | sw->uid, uid); |
3383 | return -ENODEV; |
3384 | } |
3385 | } |
3386 | |
3387 | err = tb_switch_configure(sw); |
3388 | if (err) |
3389 | return err; |
3390 | |
3391 | /* Disable wakes */ |
3392 | tb_switch_set_wake(sw, flags: 0); |
3393 | |
3394 | err = tb_switch_tmu_init(sw); |
3395 | if (err) |
3396 | return err; |
3397 | |
3398 | /* check for surviving downstream switches */ |
3399 | tb_switch_for_each_port(sw, port) { |
3400 | if (!tb_port_is_null(port)) |
3401 | continue; |
3402 | |
3403 | if (!tb_port_resume(port)) |
3404 | continue; |
3405 | |
3406 | if (tb_wait_for_port(port, wait_if_unplugged: true) <= 0) { |
3407 | tb_port_warn(port, |
3408 | "lost during suspend, disconnecting\n" ); |
3409 | if (tb_port_has_remote(port)) |
3410 | tb_sw_set_unplugged(sw: port->remote->sw); |
3411 | else if (port->xdomain) |
3412 | port->xdomain->is_unplugged = true; |
3413 | } else { |
3414 | /* |
3415 | * Always unlock the port so the downstream |
3416 | * switch/domain is accessible. |
3417 | */ |
3418 | if (tb_port_unlock(port)) |
3419 | tb_port_warn(port, "failed to unlock port\n" ); |
3420 | if (port->remote && tb_switch_resume(sw: port->remote->sw)) { |
3421 | tb_port_warn(port, |
3422 | "lost during suspend, disconnecting\n" ); |
3423 | tb_sw_set_unplugged(sw: port->remote->sw); |
3424 | } |
3425 | } |
3426 | } |
3427 | return 0; |
3428 | } |
3429 | |
3430 | /** |
3431 | * tb_switch_suspend() - Put a switch to sleep |
3432 | * @sw: Switch to suspend |
3433 | * @runtime: Is this runtime suspend or system sleep |
3434 | * |
3435 | * Suspends router and all its children. Enables wakes according to |
3436 | * value of @runtime and then sets sleep bit for the router. If @sw is |
3437 | * host router the domain is ready to go to sleep once this function |
3438 | * returns. |
3439 | */ |
3440 | void tb_switch_suspend(struct tb_switch *sw, bool runtime) |
3441 | { |
3442 | unsigned int flags = 0; |
3443 | struct tb_port *port; |
3444 | int err; |
3445 | |
3446 | tb_sw_dbg(sw, "suspending switch\n" ); |
3447 | |
3448 | /* |
3449 | * Actually only needed for Titan Ridge but for simplicity can be |
3450 | * done for USB4 device too as CLx is re-enabled at resume. |
3451 | */ |
3452 | tb_switch_clx_disable(sw); |
3453 | |
3454 | err = tb_plug_events_active(sw, active: false); |
3455 | if (err) |
3456 | return; |
3457 | |
3458 | tb_switch_for_each_port(sw, port) { |
3459 | if (tb_port_has_remote(port)) |
3460 | tb_switch_suspend(sw: port->remote->sw, runtime); |
3461 | } |
3462 | |
3463 | if (runtime) { |
3464 | /* Trigger wake when something is plugged in/out */ |
3465 | flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT; |
3466 | flags |= TB_WAKE_ON_USB4; |
3467 | flags |= TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE | TB_WAKE_ON_DP; |
3468 | } else if (device_may_wakeup(dev: &sw->dev)) { |
3469 | flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE; |
3470 | } |
3471 | |
3472 | tb_switch_set_wake(sw, flags); |
3473 | |
3474 | if (tb_switch_is_usb4(sw)) |
3475 | usb4_switch_set_sleep(sw); |
3476 | else |
3477 | tb_lc_set_sleep(sw); |
3478 | } |
3479 | |
3480 | /** |
3481 | * tb_switch_query_dp_resource() - Query availability of DP resource |
3482 | * @sw: Switch whose DP resource is queried |
3483 | * @in: DP IN port |
3484 | * |
3485 | * Queries availability of DP resource for DP tunneling using switch |
3486 | * specific means. Returns %true if resource is available. |
3487 | */ |
3488 | bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) |
3489 | { |
3490 | if (tb_switch_is_usb4(sw)) |
3491 | return usb4_switch_query_dp_resource(sw, in); |
3492 | return tb_lc_dp_sink_query(sw, in); |
3493 | } |
3494 | |
3495 | /** |
3496 | * tb_switch_alloc_dp_resource() - Allocate available DP resource |
3497 | * @sw: Switch whose DP resource is allocated |
3498 | * @in: DP IN port |
3499 | * |
3500 | * Allocates DP resource for DP tunneling. The resource must be |
3501 | * available for this to succeed (see tb_switch_query_dp_resource()). |
3502 | * Returns %0 in success and negative errno otherwise. |
3503 | */ |
3504 | int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) |
3505 | { |
3506 | int ret; |
3507 | |
3508 | if (tb_switch_is_usb4(sw)) |
3509 | ret = usb4_switch_alloc_dp_resource(sw, in); |
3510 | else |
3511 | ret = tb_lc_dp_sink_alloc(sw, in); |
3512 | |
3513 | if (ret) |
3514 | tb_sw_warn(sw, "failed to allocate DP resource for port %d\n" , |
3515 | in->port); |
3516 | else |
3517 | tb_sw_dbg(sw, "allocated DP resource for port %d\n" , in->port); |
3518 | |
3519 | return ret; |
3520 | } |
3521 | |
3522 | /** |
3523 | * tb_switch_dealloc_dp_resource() - De-allocate DP resource |
3524 | * @sw: Switch whose DP resource is de-allocated |
3525 | * @in: DP IN port |
3526 | * |
3527 | * De-allocates DP resource that was previously allocated for DP |
3528 | * tunneling. |
3529 | */ |
3530 | void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) |
3531 | { |
3532 | int ret; |
3533 | |
3534 | if (tb_switch_is_usb4(sw)) |
3535 | ret = usb4_switch_dealloc_dp_resource(sw, in); |
3536 | else |
3537 | ret = tb_lc_dp_sink_dealloc(sw, in); |
3538 | |
3539 | if (ret) |
3540 | tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n" , |
3541 | in->port); |
3542 | else |
3543 | tb_sw_dbg(sw, "released DP resource for port %d\n" , in->port); |
3544 | } |
3545 | |
3546 | struct tb_sw_lookup { |
3547 | struct tb *tb; |
3548 | u8 link; |
3549 | u8 depth; |
3550 | const uuid_t *uuid; |
3551 | u64 route; |
3552 | }; |
3553 | |
3554 | static int tb_switch_match(struct device *dev, const void *data) |
3555 | { |
3556 | struct tb_switch *sw = tb_to_switch(dev); |
3557 | const struct tb_sw_lookup *lookup = data; |
3558 | |
3559 | if (!sw) |
3560 | return 0; |
3561 | if (sw->tb != lookup->tb) |
3562 | return 0; |
3563 | |
3564 | if (lookup->uuid) |
3565 | return !memcmp(p: sw->uuid, q: lookup->uuid, size: sizeof(*lookup->uuid)); |
3566 | |
3567 | if (lookup->route) { |
3568 | return sw->config.route_lo == lower_32_bits(lookup->route) && |
3569 | sw->config.route_hi == upper_32_bits(lookup->route); |
3570 | } |
3571 | |
3572 | /* Root switch is matched only by depth */ |
3573 | if (!lookup->depth) |
3574 | return !sw->depth; |
3575 | |
3576 | return sw->link == lookup->link && sw->depth == lookup->depth; |
3577 | } |
3578 | |
3579 | /** |
3580 | * tb_switch_find_by_link_depth() - Find switch by link and depth |
3581 | * @tb: Domain the switch belongs |
3582 | * @link: Link number the switch is connected |
3583 | * @depth: Depth of the switch in link |
3584 | * |
3585 | * Returned switch has reference count increased so the caller needs to |
3586 | * call tb_switch_put() when done with the switch. |
3587 | */ |
3588 | struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth) |
3589 | { |
3590 | struct tb_sw_lookup lookup; |
3591 | struct device *dev; |
3592 | |
3593 | memset(&lookup, 0, sizeof(lookup)); |
3594 | lookup.tb = tb; |
3595 | lookup.link = link; |
3596 | lookup.depth = depth; |
3597 | |
3598 | dev = bus_find_device(bus: &tb_bus_type, NULL, data: &lookup, match: tb_switch_match); |
3599 | if (dev) |
3600 | return tb_to_switch(dev); |
3601 | |
3602 | return NULL; |
3603 | } |
3604 | |
3605 | /** |
3606 | * tb_switch_find_by_uuid() - Find switch by UUID |
3607 | * @tb: Domain the switch belongs |
3608 | * @uuid: UUID to look for |
3609 | * |
3610 | * Returned switch has reference count increased so the caller needs to |
3611 | * call tb_switch_put() when done with the switch. |
3612 | */ |
3613 | struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid) |
3614 | { |
3615 | struct tb_sw_lookup lookup; |
3616 | struct device *dev; |
3617 | |
3618 | memset(&lookup, 0, sizeof(lookup)); |
3619 | lookup.tb = tb; |
3620 | lookup.uuid = uuid; |
3621 | |
3622 | dev = bus_find_device(bus: &tb_bus_type, NULL, data: &lookup, match: tb_switch_match); |
3623 | if (dev) |
3624 | return tb_to_switch(dev); |
3625 | |
3626 | return NULL; |
3627 | } |
3628 | |
3629 | /** |
3630 | * tb_switch_find_by_route() - Find switch by route string |
3631 | * @tb: Domain the switch belongs |
3632 | * @route: Route string to look for |
3633 | * |
3634 | * Returned switch has reference count increased so the caller needs to |
3635 | * call tb_switch_put() when done with the switch. |
3636 | */ |
3637 | struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route) |
3638 | { |
3639 | struct tb_sw_lookup lookup; |
3640 | struct device *dev; |
3641 | |
3642 | if (!route) |
3643 | return tb_switch_get(sw: tb->root_switch); |
3644 | |
3645 | memset(&lookup, 0, sizeof(lookup)); |
3646 | lookup.tb = tb; |
3647 | lookup.route = route; |
3648 | |
3649 | dev = bus_find_device(bus: &tb_bus_type, NULL, data: &lookup, match: tb_switch_match); |
3650 | if (dev) |
3651 | return tb_to_switch(dev); |
3652 | |
3653 | return NULL; |
3654 | } |
3655 | |
3656 | /** |
3657 | * tb_switch_find_port() - return the first port of @type on @sw or NULL |
3658 | * @sw: Switch to find the port from |
3659 | * @type: Port type to look for |
3660 | */ |
3661 | struct tb_port *tb_switch_find_port(struct tb_switch *sw, |
3662 | enum tb_port_type type) |
3663 | { |
3664 | struct tb_port *port; |
3665 | |
3666 | tb_switch_for_each_port(sw, port) { |
3667 | if (port->config.type == type) |
3668 | return port; |
3669 | } |
3670 | |
3671 | return NULL; |
3672 | } |
3673 | |
3674 | /* |
3675 | * Can be used for read/write a specified PCIe bridge for any Thunderbolt 3 |
3676 | * device. For now used only for Titan Ridge. |
3677 | */ |
3678 | static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge, |
3679 | unsigned int pcie_offset, u32 value) |
3680 | { |
3681 | u32 offset, command, val; |
3682 | int ret; |
3683 | |
3684 | if (sw->generation != 3) |
3685 | return -EOPNOTSUPP; |
3686 | |
3687 | offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_WR_DATA; |
3688 | ret = tb_sw_write(sw, buffer: &value, space: TB_CFG_SWITCH, offset, length: 1); |
3689 | if (ret) |
3690 | return ret; |
3691 | |
3692 | command = pcie_offset & TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK; |
3693 | command |= BIT(bridge + TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT); |
3694 | command |= TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK; |
3695 | command |= TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL |
3696 | << TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT; |
3697 | command |= TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK; |
3698 | |
3699 | offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_CMD; |
3700 | |
3701 | ret = tb_sw_write(sw, buffer: &command, space: TB_CFG_SWITCH, offset, length: 1); |
3702 | if (ret) |
3703 | return ret; |
3704 | |
3705 | ret = tb_switch_wait_for_bit(sw, offset, |
3706 | TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK, value: 0, timeout_msec: 100); |
3707 | if (ret) |
3708 | return ret; |
3709 | |
3710 | ret = tb_sw_read(sw, buffer: &val, space: TB_CFG_SWITCH, offset, length: 1); |
3711 | if (ret) |
3712 | return ret; |
3713 | |
3714 | if (val & TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK) |
3715 | return -ETIMEDOUT; |
3716 | |
3717 | return 0; |
3718 | } |
3719 | |
3720 | /** |
3721 | * tb_switch_pcie_l1_enable() - Enable PCIe link to enter L1 state |
3722 | * @sw: Router to enable PCIe L1 |
3723 | * |
3724 | * For Titan Ridge switch to enter CLx state, its PCIe bridges shall enable |
3725 | * entry to PCIe L1 state. Shall be called after the upstream PCIe tunnel |
3726 | * was configured. Due to Intel platforms limitation, shall be called only |
3727 | * for first hop switch. |
3728 | */ |
3729 | int tb_switch_pcie_l1_enable(struct tb_switch *sw) |
3730 | { |
3731 | struct tb_switch *parent = tb_switch_parent(sw); |
3732 | int ret; |
3733 | |
3734 | if (!tb_route(sw)) |
3735 | return 0; |
3736 | |
3737 | if (!tb_switch_is_titan_ridge(sw)) |
3738 | return 0; |
3739 | |
3740 | /* Enable PCIe L1 enable only for first hop router (depth = 1) */ |
3741 | if (tb_route(sw: parent)) |
3742 | return 0; |
3743 | |
3744 | /* Write to downstream PCIe bridge #5 aka Dn4 */ |
3745 | ret = tb_switch_pcie_bridge_write(sw, bridge: 5, pcie_offset: 0x143, value: 0x0c7806b1); |
3746 | if (ret) |
3747 | return ret; |
3748 | |
3749 | /* Write to Upstream PCIe bridge #0 aka Up0 */ |
3750 | return tb_switch_pcie_bridge_write(sw, bridge: 0, pcie_offset: 0x143, value: 0x0c5806b1); |
3751 | } |
3752 | |
3753 | /** |
3754 | * tb_switch_xhci_connect() - Connect internal xHCI |
3755 | * @sw: Router whose xHCI to connect |
3756 | * |
3757 | * Can be called to any router. For Alpine Ridge and Titan Ridge |
3758 | * performs special flows that bring the xHCI functional for any device |
3759 | * connected to the type-C port. Call only after PCIe tunnel has been |
3760 | * established. The function only does the connect if not done already |
3761 | * so can be called several times for the same router. |
3762 | */ |
3763 | int tb_switch_xhci_connect(struct tb_switch *sw) |
3764 | { |
3765 | struct tb_port *port1, *port3; |
3766 | int ret; |
3767 | |
3768 | if (sw->generation != 3) |
3769 | return 0; |
3770 | |
3771 | port1 = &sw->ports[1]; |
3772 | port3 = &sw->ports[3]; |
3773 | |
3774 | if (tb_switch_is_alpine_ridge(sw)) { |
3775 | bool usb_port1, usb_port3, xhci_port1, xhci_port3; |
3776 | |
3777 | usb_port1 = tb_lc_is_usb_plugged(port: port1); |
3778 | usb_port3 = tb_lc_is_usb_plugged(port: port3); |
3779 | xhci_port1 = tb_lc_is_xhci_connected(port: port1); |
3780 | xhci_port3 = tb_lc_is_xhci_connected(port: port3); |
3781 | |
3782 | /* Figure out correct USB port to connect */ |
3783 | if (usb_port1 && !xhci_port1) { |
3784 | ret = tb_lc_xhci_connect(port: port1); |
3785 | if (ret) |
3786 | return ret; |
3787 | } |
3788 | if (usb_port3 && !xhci_port3) |
3789 | return tb_lc_xhci_connect(port: port3); |
3790 | } else if (tb_switch_is_titan_ridge(sw)) { |
3791 | ret = tb_lc_xhci_connect(port: port1); |
3792 | if (ret) |
3793 | return ret; |
3794 | return tb_lc_xhci_connect(port: port3); |
3795 | } |
3796 | |
3797 | return 0; |
3798 | } |
3799 | |
3800 | /** |
3801 | * tb_switch_xhci_disconnect() - Disconnect internal xHCI |
3802 | * @sw: Router whose xHCI to disconnect |
3803 | * |
3804 | * The opposite of tb_switch_xhci_connect(). Disconnects xHCI on both |
3805 | * ports. |
3806 | */ |
3807 | void tb_switch_xhci_disconnect(struct tb_switch *sw) |
3808 | { |
3809 | if (sw->generation == 3) { |
3810 | struct tb_port *port1 = &sw->ports[1]; |
3811 | struct tb_port *port3 = &sw->ports[3]; |
3812 | |
3813 | tb_lc_xhci_disconnect(port: port1); |
3814 | tb_port_dbg(port1, "disconnected xHCI\n" ); |
3815 | tb_lc_xhci_disconnect(port: port3); |
3816 | tb_port_dbg(port3, "disconnected xHCI\n" ); |
3817 | } |
3818 | } |
3819 | |