1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Internal Thunderbolt Connection Manager. This is a firmware running on |
4 | * the Thunderbolt host controller performing most of the low-level |
5 | * handling. |
6 | * |
7 | * Copyright (C) 2017, Intel Corporation |
8 | * Authors: Michael Jamet <michael.jamet@intel.com> |
9 | * Mika Westerberg <mika.westerberg@linux.intel.com> |
10 | */ |
11 | |
12 | #include <linux/delay.h> |
13 | #include <linux/mutex.h> |
14 | #include <linux/moduleparam.h> |
15 | #include <linux/pci.h> |
16 | #include <linux/pm_runtime.h> |
17 | #include <linux/platform_data/x86/apple.h> |
18 | #include <linux/sizes.h> |
19 | #include <linux/slab.h> |
20 | #include <linux/workqueue.h> |
21 | |
22 | #include "ctl.h" |
23 | #include "nhi_regs.h" |
24 | #include "tb.h" |
25 | |
26 | #define PCIE2CIO_CMD 0x30 |
27 | #define PCIE2CIO_CMD_TIMEOUT BIT(31) |
28 | #define PCIE2CIO_CMD_START BIT(30) |
29 | #define PCIE2CIO_CMD_WRITE BIT(21) |
30 | #define PCIE2CIO_CMD_CS_MASK GENMASK(20, 19) |
31 | #define PCIE2CIO_CMD_CS_SHIFT 19 |
32 | #define PCIE2CIO_CMD_PORT_MASK GENMASK(18, 13) |
33 | #define PCIE2CIO_CMD_PORT_SHIFT 13 |
34 | |
35 | #define PCIE2CIO_WRDATA 0x34 |
36 | #define PCIE2CIO_RDDATA 0x38 |
37 | |
38 | #define PHY_PORT_CS1 0x37 |
39 | #define PHY_PORT_CS1_LINK_DISABLE BIT(14) |
40 | #define PHY_PORT_CS1_LINK_STATE_MASK GENMASK(29, 26) |
41 | #define PHY_PORT_CS1_LINK_STATE_SHIFT 26 |
42 | |
43 | #define ICM_TIMEOUT 5000 /* ms */ |
44 | #define ICM_RETRIES 3 |
45 | #define ICM_APPROVE_TIMEOUT 10000 /* ms */ |
46 | #define ICM_MAX_LINK 4 |
47 | |
48 | static bool start_icm; |
49 | module_param(start_icm, bool, 0444); |
50 | MODULE_PARM_DESC(start_icm, "start ICM firmware if it is not running (default: false)" ); |
51 | |
52 | /** |
53 | * struct usb4_switch_nvm_auth - Holds USB4 NVM_AUTH status |
54 | * @reply: Reply from ICM firmware is placed here |
55 | * @request: Request that is sent to ICM firmware |
56 | * @icm: Pointer to ICM private data |
57 | */ |
58 | struct usb4_switch_nvm_auth { |
59 | struct icm_usb4_switch_op_response reply; |
60 | struct icm_usb4_switch_op request; |
61 | struct icm *icm; |
62 | }; |
63 | |
64 | /** |
65 | * struct icm - Internal connection manager private data |
66 | * @request_lock: Makes sure only one message is send to ICM at time |
67 | * @rescan_work: Work used to rescan the surviving switches after resume |
68 | * @upstream_port: Pointer to the PCIe upstream port this host |
69 | * controller is connected. This is only set for systems |
70 | * where ICM needs to be started manually |
71 | * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides |
72 | * (only set when @upstream_port is not %NULL) |
73 | * @safe_mode: ICM is in safe mode |
74 | * @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported) |
75 | * @rpm: Does the controller support runtime PM (RTD3) |
76 | * @can_upgrade_nvm: Can the NVM firmware be upgrade on this controller |
77 | * @proto_version: Firmware protocol version |
78 | * @last_nvm_auth: Last USB4 router NVM_AUTH result (or %NULL if not set) |
79 | * @veto: Is RTD3 veto in effect |
80 | * @is_supported: Checks if we can support ICM on this controller |
81 | * @cio_reset: Trigger CIO reset |
82 | * @get_mode: Read and return the ICM firmware mode (optional) |
83 | * @get_route: Find a route string for given switch |
84 | * @save_devices: Ask ICM to save devices to ACL when suspending (optional) |
85 | * @driver_ready: Send driver ready message to ICM |
86 | * @set_uuid: Set UUID for the root switch (optional) |
87 | * @device_connected: Handle device connected ICM message |
88 | * @device_disconnected: Handle device disconnected ICM message |
89 | * @xdomain_connected: Handle XDomain connected ICM message |
90 | * @xdomain_disconnected: Handle XDomain disconnected ICM message |
91 | * @rtd3_veto: Handle RTD3 veto notification ICM message |
92 | */ |
93 | struct icm { |
94 | struct mutex request_lock; |
95 | struct delayed_work rescan_work; |
96 | struct pci_dev *upstream_port; |
97 | int vnd_cap; |
98 | bool safe_mode; |
99 | size_t max_boot_acl; |
100 | bool rpm; |
101 | bool can_upgrade_nvm; |
102 | u8 proto_version; |
103 | struct usb4_switch_nvm_auth *last_nvm_auth; |
104 | bool veto; |
105 | bool (*is_supported)(struct tb *tb); |
106 | int (*cio_reset)(struct tb *tb); |
107 | int (*get_mode)(struct tb *tb); |
108 | int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route); |
109 | void (*save_devices)(struct tb *tb); |
110 | int (*driver_ready)(struct tb *tb, |
111 | enum tb_security_level *security_level, |
112 | u8 *proto_version, size_t *nboot_acl, bool *rpm); |
113 | void (*set_uuid)(struct tb *tb); |
114 | void (*device_connected)(struct tb *tb, |
115 | const struct icm_pkg_header *hdr); |
116 | void (*device_disconnected)(struct tb *tb, |
117 | const struct icm_pkg_header *hdr); |
118 | void (*xdomain_connected)(struct tb *tb, |
119 | const struct icm_pkg_header *hdr); |
120 | void (*xdomain_disconnected)(struct tb *tb, |
121 | const struct icm_pkg_header *hdr); |
122 | void (*rtd3_veto)(struct tb *tb, const struct icm_pkg_header *hdr); |
123 | }; |
124 | |
125 | struct icm_notification { |
126 | struct work_struct work; |
127 | struct icm_pkg_header *pkg; |
128 | struct tb *tb; |
129 | }; |
130 | |
131 | struct ep_name_entry { |
132 | u8 len; |
133 | u8 type; |
134 | u8 data[]; |
135 | }; |
136 | |
137 | #define EP_NAME_INTEL_VSS 0x10 |
138 | |
139 | /* Intel Vendor specific structure */ |
140 | struct intel_vss { |
141 | u16 vendor; |
142 | u16 model; |
143 | u8 mc; |
144 | u8 flags; |
145 | u16 pci_devid; |
146 | u32 nvm_version; |
147 | }; |
148 | |
149 | #define INTEL_VSS_FLAGS_RTD3 BIT(0) |
150 | |
151 | static const struct intel_vss *parse_intel_vss(const void *ep_name, size_t size) |
152 | { |
153 | const void *end = ep_name + size; |
154 | |
155 | while (ep_name < end) { |
156 | const struct ep_name_entry *ep = ep_name; |
157 | |
158 | if (!ep->len) |
159 | break; |
160 | if (ep_name + ep->len > end) |
161 | break; |
162 | |
163 | if (ep->type == EP_NAME_INTEL_VSS) |
164 | return (const struct intel_vss *)ep->data; |
165 | |
166 | ep_name += ep->len; |
167 | } |
168 | |
169 | return NULL; |
170 | } |
171 | |
172 | static bool intel_vss_is_rtd3(const void *ep_name, size_t size) |
173 | { |
174 | const struct intel_vss *vss; |
175 | |
176 | vss = parse_intel_vss(ep_name, size); |
177 | if (vss) |
178 | return !!(vss->flags & INTEL_VSS_FLAGS_RTD3); |
179 | |
180 | return false; |
181 | } |
182 | |
183 | static inline struct tb *icm_to_tb(struct icm *icm) |
184 | { |
185 | return ((void *)icm - sizeof(struct tb)); |
186 | } |
187 | |
188 | static inline u8 phy_port_from_route(u64 route, u8 depth) |
189 | { |
190 | u8 link; |
191 | |
192 | link = depth ? route >> ((depth - 1) * 8) : route; |
193 | return tb_phy_port_from_link(link); |
194 | } |
195 | |
196 | static inline u8 dual_link_from_link(u8 link) |
197 | { |
198 | return link ? ((link - 1) ^ 0x01) + 1 : 0; |
199 | } |
200 | |
201 | static inline u64 get_route(u32 route_hi, u32 route_lo) |
202 | { |
203 | return (u64)route_hi << 32 | route_lo; |
204 | } |
205 | |
206 | static inline u64 get_parent_route(u64 route) |
207 | { |
208 | int depth = tb_route_length(route); |
209 | return depth ? route & ~(0xffULL << (depth - 1) * TB_ROUTE_SHIFT) : 0; |
210 | } |
211 | |
212 | static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec) |
213 | { |
214 | unsigned long end = jiffies + msecs_to_jiffies(m: timeout_msec); |
215 | u32 cmd; |
216 | |
217 | do { |
218 | pci_read_config_dword(dev: icm->upstream_port, |
219 | where: icm->vnd_cap + PCIE2CIO_CMD, val: &cmd); |
220 | if (!(cmd & PCIE2CIO_CMD_START)) { |
221 | if (cmd & PCIE2CIO_CMD_TIMEOUT) |
222 | break; |
223 | return 0; |
224 | } |
225 | |
226 | msleep(msecs: 50); |
227 | } while (time_before(jiffies, end)); |
228 | |
229 | return -ETIMEDOUT; |
230 | } |
231 | |
232 | static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs, |
233 | unsigned int port, unsigned int index, u32 *data) |
234 | { |
235 | struct pci_dev *pdev = icm->upstream_port; |
236 | int ret, vnd_cap = icm->vnd_cap; |
237 | u32 cmd; |
238 | |
239 | cmd = index; |
240 | cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK; |
241 | cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK; |
242 | cmd |= PCIE2CIO_CMD_START; |
243 | pci_write_config_dword(dev: pdev, where: vnd_cap + PCIE2CIO_CMD, val: cmd); |
244 | |
245 | ret = pci2cio_wait_completion(icm, timeout_msec: 5000); |
246 | if (ret) |
247 | return ret; |
248 | |
249 | pci_read_config_dword(dev: pdev, where: vnd_cap + PCIE2CIO_RDDATA, val: data); |
250 | return 0; |
251 | } |
252 | |
253 | static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs, |
254 | unsigned int port, unsigned int index, u32 data) |
255 | { |
256 | struct pci_dev *pdev = icm->upstream_port; |
257 | int vnd_cap = icm->vnd_cap; |
258 | u32 cmd; |
259 | |
260 | pci_write_config_dword(dev: pdev, where: vnd_cap + PCIE2CIO_WRDATA, val: data); |
261 | |
262 | cmd = index; |
263 | cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK; |
264 | cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK; |
265 | cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START; |
266 | pci_write_config_dword(dev: pdev, where: vnd_cap + PCIE2CIO_CMD, val: cmd); |
267 | |
268 | return pci2cio_wait_completion(icm, timeout_msec: 5000); |
269 | } |
270 | |
271 | static bool icm_match(const struct tb_cfg_request *req, |
272 | const struct ctl_pkg *pkg) |
273 | { |
274 | const struct icm_pkg_header *res_hdr = pkg->buffer; |
275 | const struct icm_pkg_header *req_hdr = req->request; |
276 | |
277 | if (pkg->frame.eof != req->response_type) |
278 | return false; |
279 | if (res_hdr->code != req_hdr->code) |
280 | return false; |
281 | |
282 | return true; |
283 | } |
284 | |
285 | static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg) |
286 | { |
287 | const struct icm_pkg_header *hdr = pkg->buffer; |
288 | |
289 | if (hdr->packet_id < req->npackets) { |
290 | size_t offset = hdr->packet_id * req->response_size; |
291 | |
292 | memcpy(req->response + offset, pkg->buffer, req->response_size); |
293 | } |
294 | |
295 | return hdr->packet_id == hdr->total_packets - 1; |
296 | } |
297 | |
298 | static int icm_request(struct tb *tb, const void *request, size_t request_size, |
299 | void *response, size_t response_size, size_t npackets, |
300 | int retries, unsigned int timeout_msec) |
301 | { |
302 | struct icm *icm = tb_priv(tb); |
303 | |
304 | do { |
305 | struct tb_cfg_request *req; |
306 | struct tb_cfg_result res; |
307 | |
308 | req = tb_cfg_request_alloc(); |
309 | if (!req) |
310 | return -ENOMEM; |
311 | |
312 | req->match = icm_match; |
313 | req->copy = icm_copy; |
314 | req->request = request; |
315 | req->request_size = request_size; |
316 | req->request_type = TB_CFG_PKG_ICM_CMD; |
317 | req->response = response; |
318 | req->npackets = npackets; |
319 | req->response_size = response_size; |
320 | req->response_type = TB_CFG_PKG_ICM_RESP; |
321 | |
322 | mutex_lock(&icm->request_lock); |
323 | res = tb_cfg_request_sync(ctl: tb->ctl, req, timeout_msec); |
324 | mutex_unlock(lock: &icm->request_lock); |
325 | |
326 | tb_cfg_request_put(req); |
327 | |
328 | if (res.err != -ETIMEDOUT) |
329 | return res.err == 1 ? -EIO : res.err; |
330 | |
331 | usleep_range(min: 20, max: 50); |
332 | } while (retries--); |
333 | |
334 | return -ETIMEDOUT; |
335 | } |
336 | |
337 | /* |
338 | * If rescan is queued to run (we are resuming), postpone it to give the |
339 | * firmware some more time to send device connected notifications for next |
340 | * devices in the chain. |
341 | */ |
342 | static void icm_postpone_rescan(struct tb *tb) |
343 | { |
344 | struct icm *icm = tb_priv(tb); |
345 | |
346 | if (delayed_work_pending(&icm->rescan_work)) |
347 | mod_delayed_work(wq: tb->wq, dwork: &icm->rescan_work, |
348 | delay: msecs_to_jiffies(m: 500)); |
349 | } |
350 | |
351 | static void icm_veto_begin(struct tb *tb) |
352 | { |
353 | struct icm *icm = tb_priv(tb); |
354 | |
355 | if (!icm->veto) { |
356 | icm->veto = true; |
357 | /* Keep the domain powered while veto is in effect */ |
358 | pm_runtime_get(dev: &tb->dev); |
359 | } |
360 | } |
361 | |
362 | static void icm_veto_end(struct tb *tb) |
363 | { |
364 | struct icm *icm = tb_priv(tb); |
365 | |
366 | if (icm->veto) { |
367 | icm->veto = false; |
368 | /* Allow the domain suspend now */ |
369 | pm_runtime_mark_last_busy(dev: &tb->dev); |
370 | pm_runtime_put_autosuspend(dev: &tb->dev); |
371 | } |
372 | } |
373 | |
374 | static bool icm_firmware_running(const struct tb_nhi *nhi) |
375 | { |
376 | u32 val; |
377 | |
378 | val = ioread32(nhi->iobase + REG_FW_STS); |
379 | return !!(val & REG_FW_STS_ICM_EN); |
380 | } |
381 | |
382 | static bool icm_fr_is_supported(struct tb *tb) |
383 | { |
384 | return !x86_apple_machine; |
385 | } |
386 | |
387 | static inline int icm_fr_get_switch_index(u32 port) |
388 | { |
389 | int index; |
390 | |
391 | if ((port & ICM_PORT_TYPE_MASK) != TB_TYPE_PORT) |
392 | return 0; |
393 | |
394 | index = port >> ICM_PORT_INDEX_SHIFT; |
395 | return index != 0xff ? index : 0; |
396 | } |
397 | |
398 | static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route) |
399 | { |
400 | struct icm_fr_pkg_get_topology_response *switches, *sw; |
401 | struct icm_fr_pkg_get_topology request = { |
402 | .hdr = { .code = ICM_GET_TOPOLOGY }, |
403 | }; |
404 | size_t npackets = ICM_GET_TOPOLOGY_PACKETS; |
405 | int ret, index; |
406 | u8 i; |
407 | |
408 | switches = kcalloc(n: npackets, size: sizeof(*switches), GFP_KERNEL); |
409 | if (!switches) |
410 | return -ENOMEM; |
411 | |
412 | ret = icm_request(tb, request: &request, request_size: sizeof(request), response: switches, |
413 | response_size: sizeof(*switches), npackets, ICM_RETRIES, ICM_TIMEOUT); |
414 | if (ret) |
415 | goto err_free; |
416 | |
417 | sw = &switches[0]; |
418 | index = icm_fr_get_switch_index(port: sw->ports[link]); |
419 | if (!index) { |
420 | ret = -ENODEV; |
421 | goto err_free; |
422 | } |
423 | |
424 | sw = &switches[index]; |
425 | for (i = 1; i < depth; i++) { |
426 | unsigned int j; |
427 | |
428 | if (!(sw->first_data & ICM_SWITCH_USED)) { |
429 | ret = -ENODEV; |
430 | goto err_free; |
431 | } |
432 | |
433 | for (j = 0; j < ARRAY_SIZE(sw->ports); j++) { |
434 | index = icm_fr_get_switch_index(port: sw->ports[j]); |
435 | if (index > sw->switch_index) { |
436 | sw = &switches[index]; |
437 | break; |
438 | } |
439 | } |
440 | } |
441 | |
442 | *route = get_route(route_hi: sw->route_hi, route_lo: sw->route_lo); |
443 | |
444 | err_free: |
445 | kfree(objp: switches); |
446 | return ret; |
447 | } |
448 | |
449 | static void icm_fr_save_devices(struct tb *tb) |
450 | { |
451 | nhi_mailbox_cmd(nhi: tb->nhi, cmd: NHI_MAILBOX_SAVE_DEVS, data: 0); |
452 | } |
453 | |
454 | static int |
455 | icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level, |
456 | u8 *proto_version, size_t *nboot_acl, bool *rpm) |
457 | { |
458 | struct icm_fr_pkg_driver_ready_response reply; |
459 | struct icm_pkg_driver_ready request = { |
460 | .hdr.code = ICM_DRIVER_READY, |
461 | }; |
462 | int ret; |
463 | |
464 | memset(&reply, 0, sizeof(reply)); |
465 | ret = icm_request(tb, request: &request, request_size: sizeof(request), response: &reply, response_size: sizeof(reply), |
466 | npackets: 1, ICM_RETRIES, ICM_TIMEOUT); |
467 | if (ret) |
468 | return ret; |
469 | |
470 | if (security_level) |
471 | *security_level = reply.security_level & ICM_FR_SLEVEL_MASK; |
472 | |
473 | return 0; |
474 | } |
475 | |
476 | static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw) |
477 | { |
478 | struct icm_fr_pkg_approve_device request; |
479 | struct icm_fr_pkg_approve_device reply; |
480 | int ret; |
481 | |
482 | memset(&request, 0, sizeof(request)); |
483 | memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); |
484 | request.hdr.code = ICM_APPROVE_DEVICE; |
485 | request.connection_id = sw->connection_id; |
486 | request.connection_key = sw->connection_key; |
487 | |
488 | memset(&reply, 0, sizeof(reply)); |
489 | /* Use larger timeout as establishing tunnels can take some time */ |
490 | ret = icm_request(tb, request: &request, request_size: sizeof(request), response: &reply, response_size: sizeof(reply), |
491 | npackets: 1, ICM_RETRIES, ICM_APPROVE_TIMEOUT); |
492 | if (ret) |
493 | return ret; |
494 | |
495 | if (reply.hdr.flags & ICM_FLAGS_ERROR) { |
496 | tb_warn(tb, "PCIe tunnel creation failed\n" ); |
497 | return -EIO; |
498 | } |
499 | |
500 | return 0; |
501 | } |
502 | |
503 | static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw) |
504 | { |
505 | struct icm_fr_pkg_add_device_key request; |
506 | struct icm_fr_pkg_add_device_key_response reply; |
507 | int ret; |
508 | |
509 | memset(&request, 0, sizeof(request)); |
510 | memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); |
511 | request.hdr.code = ICM_ADD_DEVICE_KEY; |
512 | request.connection_id = sw->connection_id; |
513 | request.connection_key = sw->connection_key; |
514 | memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE); |
515 | |
516 | memset(&reply, 0, sizeof(reply)); |
517 | ret = icm_request(tb, request: &request, request_size: sizeof(request), response: &reply, response_size: sizeof(reply), |
518 | npackets: 1, ICM_RETRIES, ICM_TIMEOUT); |
519 | if (ret) |
520 | return ret; |
521 | |
522 | if (reply.hdr.flags & ICM_FLAGS_ERROR) { |
523 | tb_warn(tb, "Adding key to switch failed\n" ); |
524 | return -EIO; |
525 | } |
526 | |
527 | return 0; |
528 | } |
529 | |
530 | static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw, |
531 | const u8 *challenge, u8 *response) |
532 | { |
533 | struct icm_fr_pkg_challenge_device request; |
534 | struct icm_fr_pkg_challenge_device_response reply; |
535 | int ret; |
536 | |
537 | memset(&request, 0, sizeof(request)); |
538 | memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); |
539 | request.hdr.code = ICM_CHALLENGE_DEVICE; |
540 | request.connection_id = sw->connection_id; |
541 | request.connection_key = sw->connection_key; |
542 | memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE); |
543 | |
544 | memset(&reply, 0, sizeof(reply)); |
545 | ret = icm_request(tb, request: &request, request_size: sizeof(request), response: &reply, response_size: sizeof(reply), |
546 | npackets: 1, ICM_RETRIES, ICM_TIMEOUT); |
547 | if (ret) |
548 | return ret; |
549 | |
550 | if (reply.hdr.flags & ICM_FLAGS_ERROR) |
551 | return -EKEYREJECTED; |
552 | if (reply.hdr.flags & ICM_FLAGS_NO_KEY) |
553 | return -ENOKEY; |
554 | |
555 | memcpy(response, reply.response, TB_SWITCH_KEY_SIZE); |
556 | |
557 | return 0; |
558 | } |
559 | |
560 | static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, |
561 | int transmit_path, int transmit_ring, |
562 | int receive_path, int receive_ring) |
563 | { |
564 | struct icm_fr_pkg_approve_xdomain_response reply; |
565 | struct icm_fr_pkg_approve_xdomain request; |
566 | int ret; |
567 | |
568 | memset(&request, 0, sizeof(request)); |
569 | request.hdr.code = ICM_APPROVE_XDOMAIN; |
570 | request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link; |
571 | memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); |
572 | |
573 | request.transmit_path = transmit_path; |
574 | request.transmit_ring = transmit_ring; |
575 | request.receive_path = receive_path; |
576 | request.receive_ring = receive_ring; |
577 | |
578 | memset(&reply, 0, sizeof(reply)); |
579 | ret = icm_request(tb, request: &request, request_size: sizeof(request), response: &reply, response_size: sizeof(reply), |
580 | npackets: 1, ICM_RETRIES, ICM_TIMEOUT); |
581 | if (ret) |
582 | return ret; |
583 | |
584 | if (reply.hdr.flags & ICM_FLAGS_ERROR) |
585 | return -EIO; |
586 | |
587 | return 0; |
588 | } |
589 | |
590 | static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, |
591 | int transmit_path, int transmit_ring, |
592 | int receive_path, int receive_ring) |
593 | { |
594 | u8 phy_port; |
595 | u8 cmd; |
596 | |
597 | phy_port = tb_phy_port_from_link(link: xd->link); |
598 | if (phy_port == 0) |
599 | cmd = NHI_MAILBOX_DISCONNECT_PA; |
600 | else |
601 | cmd = NHI_MAILBOX_DISCONNECT_PB; |
602 | |
603 | nhi_mailbox_cmd(nhi: tb->nhi, cmd, data: 1); |
604 | usleep_range(min: 10, max: 50); |
605 | nhi_mailbox_cmd(nhi: tb->nhi, cmd, data: 2); |
606 | return 0; |
607 | } |
608 | |
609 | static struct tb_switch *alloc_switch(struct tb_switch *parent_sw, u64 route, |
610 | const uuid_t *uuid) |
611 | { |
612 | struct tb *tb = parent_sw->tb; |
613 | struct tb_switch *sw; |
614 | |
615 | sw = tb_switch_alloc(tb, parent: &parent_sw->dev, route); |
616 | if (IS_ERR(ptr: sw)) { |
617 | tb_warn(tb, "failed to allocate switch at %llx\n" , route); |
618 | return sw; |
619 | } |
620 | |
621 | sw->uuid = kmemdup(p: uuid, size: sizeof(*uuid), GFP_KERNEL); |
622 | if (!sw->uuid) { |
623 | tb_switch_put(sw); |
624 | return ERR_PTR(error: -ENOMEM); |
625 | } |
626 | |
627 | init_completion(x: &sw->rpm_complete); |
628 | return sw; |
629 | } |
630 | |
631 | static int add_switch(struct tb_switch *parent_sw, struct tb_switch *sw) |
632 | { |
633 | u64 route = tb_route(sw); |
634 | int ret; |
635 | |
636 | /* Link the two switches now */ |
637 | tb_port_at(route, sw: parent_sw)->remote = tb_upstream_port(sw); |
638 | tb_upstream_port(sw)->remote = tb_port_at(route, sw: parent_sw); |
639 | |
640 | ret = tb_switch_add(sw); |
641 | if (ret) |
642 | tb_port_at(route: tb_route(sw), sw: parent_sw)->remote = NULL; |
643 | |
644 | return ret; |
645 | } |
646 | |
647 | static void update_switch(struct tb_switch *sw, u64 route, u8 connection_id, |
648 | u8 connection_key, u8 link, u8 depth, bool boot) |
649 | { |
650 | struct tb_switch *parent_sw = tb_switch_parent(sw); |
651 | |
652 | /* Disconnect from parent */ |
653 | tb_switch_downstream_port(sw)->remote = NULL; |
654 | /* Re-connect via updated port */ |
655 | tb_port_at(route, sw: parent_sw)->remote = tb_upstream_port(sw); |
656 | |
657 | /* Update with the new addressing information */ |
658 | sw->config.route_hi = upper_32_bits(route); |
659 | sw->config.route_lo = lower_32_bits(route); |
660 | sw->connection_id = connection_id; |
661 | sw->connection_key = connection_key; |
662 | sw->link = link; |
663 | sw->depth = depth; |
664 | sw->boot = boot; |
665 | |
666 | /* This switch still exists */ |
667 | sw->is_unplugged = false; |
668 | |
669 | /* Runtime resume is now complete */ |
670 | complete(&sw->rpm_complete); |
671 | } |
672 | |
673 | static void remove_switch(struct tb_switch *sw) |
674 | { |
675 | tb_switch_downstream_port(sw)->remote = NULL; |
676 | tb_switch_remove(sw); |
677 | } |
678 | |
679 | static void add_xdomain(struct tb_switch *sw, u64 route, |
680 | const uuid_t *local_uuid, const uuid_t *remote_uuid, |
681 | u8 link, u8 depth) |
682 | { |
683 | struct tb_xdomain *xd; |
684 | |
685 | pm_runtime_get_sync(dev: &sw->dev); |
686 | |
687 | xd = tb_xdomain_alloc(tb: sw->tb, parent: &sw->dev, route, local_uuid, remote_uuid); |
688 | if (!xd) |
689 | goto out; |
690 | |
691 | xd->link = link; |
692 | xd->depth = depth; |
693 | |
694 | tb_port_at(route, sw)->xdomain = xd; |
695 | |
696 | tb_xdomain_add(xd); |
697 | |
698 | out: |
699 | pm_runtime_mark_last_busy(dev: &sw->dev); |
700 | pm_runtime_put_autosuspend(dev: &sw->dev); |
701 | } |
702 | |
703 | static void update_xdomain(struct tb_xdomain *xd, u64 route, u8 link) |
704 | { |
705 | xd->link = link; |
706 | xd->route = route; |
707 | xd->is_unplugged = false; |
708 | } |
709 | |
710 | static void remove_xdomain(struct tb_xdomain *xd) |
711 | { |
712 | struct tb_switch *sw; |
713 | |
714 | sw = tb_to_switch(dev: xd->dev.parent); |
715 | tb_port_at(route: xd->route, sw)->xdomain = NULL; |
716 | tb_xdomain_remove(xd); |
717 | } |
718 | |
719 | static void |
720 | icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) |
721 | { |
722 | const struct icm_fr_event_device_connected *pkg = |
723 | (const struct icm_fr_event_device_connected *)hdr; |
724 | enum tb_security_level security_level; |
725 | struct tb_switch *sw, *parent_sw; |
726 | bool boot, dual_lane, speed_gen3; |
727 | struct icm *icm = tb_priv(tb); |
728 | bool authorized = false; |
729 | struct tb_xdomain *xd; |
730 | u8 link, depth; |
731 | u64 route; |
732 | int ret; |
733 | |
734 | icm_postpone_rescan(tb); |
735 | |
736 | link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; |
737 | depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> |
738 | ICM_LINK_INFO_DEPTH_SHIFT; |
739 | authorized = pkg->link_info & ICM_LINK_INFO_APPROVED; |
740 | security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >> |
741 | ICM_FLAGS_SLEVEL_SHIFT; |
742 | boot = pkg->link_info & ICM_LINK_INFO_BOOT; |
743 | dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE; |
744 | speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3; |
745 | |
746 | if (pkg->link_info & ICM_LINK_INFO_REJECTED) { |
747 | tb_info(tb, "switch at %u.%u was rejected by ICM firmware because topology limit exceeded\n" , |
748 | link, depth); |
749 | return; |
750 | } |
751 | |
752 | sw = tb_switch_find_by_uuid(tb, uuid: &pkg->ep_uuid); |
753 | if (sw) { |
754 | u8 phy_port, sw_phy_port; |
755 | |
756 | sw_phy_port = tb_phy_port_from_link(link: sw->link); |
757 | phy_port = tb_phy_port_from_link(link); |
758 | |
759 | /* |
760 | * On resume ICM will send us connected events for the |
761 | * devices that still are present. However, that |
762 | * information might have changed for example by the |
763 | * fact that a switch on a dual-link connection might |
764 | * have been enumerated using the other link now. Make |
765 | * sure our book keeping matches that. |
766 | */ |
767 | if (sw->depth == depth && sw_phy_port == phy_port && |
768 | !!sw->authorized == authorized) { |
769 | /* |
770 | * It was enumerated through another link so update |
771 | * route string accordingly. |
772 | */ |
773 | if (sw->link != link) { |
774 | ret = icm->get_route(tb, link, depth, &route); |
775 | if (ret) { |
776 | tb_err(tb, "failed to update route string for switch at %u.%u\n" , |
777 | link, depth); |
778 | tb_switch_put(sw); |
779 | return; |
780 | } |
781 | } else { |
782 | route = tb_route(sw); |
783 | } |
784 | |
785 | update_switch(sw, route, connection_id: pkg->connection_id, |
786 | connection_key: pkg->connection_key, link, depth, boot); |
787 | tb_switch_put(sw); |
788 | return; |
789 | } |
790 | |
791 | /* |
792 | * User connected the same switch to another physical |
793 | * port or to another part of the topology. Remove the |
794 | * existing switch now before adding the new one. |
795 | */ |
796 | remove_switch(sw); |
797 | tb_switch_put(sw); |
798 | } |
799 | |
800 | /* |
801 | * If the switch was not found by UUID, look for a switch on |
802 | * same physical port (taking possible link aggregation into |
803 | * account) and depth. If we found one it is definitely a stale |
804 | * one so remove it first. |
805 | */ |
806 | sw = tb_switch_find_by_link_depth(tb, link, depth); |
807 | if (!sw) { |
808 | u8 dual_link; |
809 | |
810 | dual_link = dual_link_from_link(link); |
811 | if (dual_link) |
812 | sw = tb_switch_find_by_link_depth(tb, link: dual_link, depth); |
813 | } |
814 | if (sw) { |
815 | remove_switch(sw); |
816 | tb_switch_put(sw); |
817 | } |
818 | |
819 | /* Remove existing XDomain connection if found */ |
820 | xd = tb_xdomain_find_by_link_depth(tb, link, depth); |
821 | if (xd) { |
822 | remove_xdomain(xd); |
823 | tb_xdomain_put(xd); |
824 | } |
825 | |
826 | parent_sw = tb_switch_find_by_link_depth(tb, link, depth: depth - 1); |
827 | if (!parent_sw) { |
828 | tb_err(tb, "failed to find parent switch for %u.%u\n" , |
829 | link, depth); |
830 | return; |
831 | } |
832 | |
833 | ret = icm->get_route(tb, link, depth, &route); |
834 | if (ret) { |
835 | tb_err(tb, "failed to find route string for switch at %u.%u\n" , |
836 | link, depth); |
837 | tb_switch_put(sw: parent_sw); |
838 | return; |
839 | } |
840 | |
841 | pm_runtime_get_sync(dev: &parent_sw->dev); |
842 | |
843 | sw = alloc_switch(parent_sw, route, uuid: &pkg->ep_uuid); |
844 | if (!IS_ERR(ptr: sw)) { |
845 | sw->connection_id = pkg->connection_id; |
846 | sw->connection_key = pkg->connection_key; |
847 | sw->link = link; |
848 | sw->depth = depth; |
849 | sw->authorized = authorized; |
850 | sw->security_level = security_level; |
851 | sw->boot = boot; |
852 | sw->link_speed = speed_gen3 ? 20 : 10; |
853 | sw->link_width = dual_lane ? TB_LINK_WIDTH_DUAL : |
854 | TB_LINK_WIDTH_SINGLE; |
855 | sw->rpm = intel_vss_is_rtd3(ep_name: pkg->ep_name, size: sizeof(pkg->ep_name)); |
856 | |
857 | if (add_switch(parent_sw, sw)) |
858 | tb_switch_put(sw); |
859 | } |
860 | |
861 | pm_runtime_mark_last_busy(dev: &parent_sw->dev); |
862 | pm_runtime_put_autosuspend(dev: &parent_sw->dev); |
863 | |
864 | tb_switch_put(sw: parent_sw); |
865 | } |
866 | |
867 | static void |
868 | icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) |
869 | { |
870 | const struct icm_fr_event_device_disconnected *pkg = |
871 | (const struct icm_fr_event_device_disconnected *)hdr; |
872 | struct tb_switch *sw; |
873 | u8 link, depth; |
874 | |
875 | link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; |
876 | depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> |
877 | ICM_LINK_INFO_DEPTH_SHIFT; |
878 | |
879 | if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) { |
880 | tb_warn(tb, "invalid topology %u.%u, ignoring\n" , link, depth); |
881 | return; |
882 | } |
883 | |
884 | sw = tb_switch_find_by_link_depth(tb, link, depth); |
885 | if (!sw) { |
886 | tb_warn(tb, "no switch exists at %u.%u, ignoring\n" , link, |
887 | depth); |
888 | return; |
889 | } |
890 | |
891 | pm_runtime_get_sync(dev: sw->dev.parent); |
892 | |
893 | remove_switch(sw); |
894 | |
895 | pm_runtime_mark_last_busy(dev: sw->dev.parent); |
896 | pm_runtime_put_autosuspend(dev: sw->dev.parent); |
897 | |
898 | tb_switch_put(sw); |
899 | } |
900 | |
901 | static void |
902 | icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr) |
903 | { |
904 | const struct icm_fr_event_xdomain_connected *pkg = |
905 | (const struct icm_fr_event_xdomain_connected *)hdr; |
906 | struct tb_xdomain *xd; |
907 | struct tb_switch *sw; |
908 | u8 link, depth; |
909 | u64 route; |
910 | |
911 | link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; |
912 | depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> |
913 | ICM_LINK_INFO_DEPTH_SHIFT; |
914 | |
915 | if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) { |
916 | tb_warn(tb, "invalid topology %u.%u, ignoring\n" , link, depth); |
917 | return; |
918 | } |
919 | |
920 | route = get_route(route_hi: pkg->local_route_hi, route_lo: pkg->local_route_lo); |
921 | |
922 | xd = tb_xdomain_find_by_uuid(tb, uuid: &pkg->remote_uuid); |
923 | if (xd) { |
924 | u8 xd_phy_port, phy_port; |
925 | |
926 | xd_phy_port = phy_port_from_route(route: xd->route, depth: xd->depth); |
927 | phy_port = phy_port_from_route(route, depth); |
928 | |
929 | if (xd->depth == depth && xd_phy_port == phy_port) { |
930 | update_xdomain(xd, route, link); |
931 | tb_xdomain_put(xd); |
932 | return; |
933 | } |
934 | |
935 | /* |
936 | * If we find an existing XDomain connection remove it |
937 | * now. We need to go through login handshake and |
938 | * everything anyway to be able to re-establish the |
939 | * connection. |
940 | */ |
941 | remove_xdomain(xd); |
942 | tb_xdomain_put(xd); |
943 | } |
944 | |
945 | /* |
946 | * Look if there already exists an XDomain in the same place |
947 | * than the new one and in that case remove it because it is |
948 | * most likely another host that got disconnected. |
949 | */ |
950 | xd = tb_xdomain_find_by_link_depth(tb, link, depth); |
951 | if (!xd) { |
952 | u8 dual_link; |
953 | |
954 | dual_link = dual_link_from_link(link); |
955 | if (dual_link) |
956 | xd = tb_xdomain_find_by_link_depth(tb, link: dual_link, |
957 | depth); |
958 | } |
959 | if (xd) { |
960 | remove_xdomain(xd); |
961 | tb_xdomain_put(xd); |
962 | } |
963 | |
964 | /* |
965 | * If the user disconnected a switch during suspend and |
966 | * connected another host to the same port, remove the switch |
967 | * first. |
968 | */ |
969 | sw = tb_switch_find_by_route(tb, route); |
970 | if (sw) { |
971 | remove_switch(sw); |
972 | tb_switch_put(sw); |
973 | } |
974 | |
975 | sw = tb_switch_find_by_link_depth(tb, link, depth); |
976 | if (!sw) { |
977 | tb_warn(tb, "no switch exists at %u.%u, ignoring\n" , link, |
978 | depth); |
979 | return; |
980 | } |
981 | |
982 | add_xdomain(sw, route, local_uuid: &pkg->local_uuid, remote_uuid: &pkg->remote_uuid, link, |
983 | depth); |
984 | tb_switch_put(sw); |
985 | } |
986 | |
987 | static void |
988 | icm_fr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) |
989 | { |
990 | const struct icm_fr_event_xdomain_disconnected *pkg = |
991 | (const struct icm_fr_event_xdomain_disconnected *)hdr; |
992 | struct tb_xdomain *xd; |
993 | |
994 | /* |
995 | * If the connection is through one or multiple devices, the |
996 | * XDomain device is removed along with them so it is fine if we |
997 | * cannot find it here. |
998 | */ |
999 | xd = tb_xdomain_find_by_uuid(tb, uuid: &pkg->remote_uuid); |
1000 | if (xd) { |
1001 | remove_xdomain(xd); |
1002 | tb_xdomain_put(xd); |
1003 | } |
1004 | } |
1005 | |
1006 | static int icm_tr_cio_reset(struct tb *tb) |
1007 | { |
1008 | return pcie2cio_write(icm: tb_priv(tb), cs: TB_CFG_SWITCH, port: 0, index: 0x777, BIT(1)); |
1009 | } |
1010 | |
1011 | static int |
1012 | icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level, |
1013 | u8 *proto_version, size_t *nboot_acl, bool *rpm) |
1014 | { |
1015 | struct icm_tr_pkg_driver_ready_response reply; |
1016 | struct icm_pkg_driver_ready request = { |
1017 | .hdr.code = ICM_DRIVER_READY, |
1018 | }; |
1019 | int ret; |
1020 | |
1021 | memset(&reply, 0, sizeof(reply)); |
1022 | ret = icm_request(tb, request: &request, request_size: sizeof(request), response: &reply, response_size: sizeof(reply), |
1023 | npackets: 1, retries: 10, timeout_msec: 2000); |
1024 | if (ret) |
1025 | return ret; |
1026 | |
1027 | if (security_level) |
1028 | *security_level = reply.info & ICM_TR_INFO_SLEVEL_MASK; |
1029 | if (proto_version) |
1030 | *proto_version = (reply.info & ICM_TR_INFO_PROTO_VERSION_MASK) >> |
1031 | ICM_TR_INFO_PROTO_VERSION_SHIFT; |
1032 | if (nboot_acl) |
1033 | *nboot_acl = (reply.info & ICM_TR_INFO_BOOT_ACL_MASK) >> |
1034 | ICM_TR_INFO_BOOT_ACL_SHIFT; |
1035 | if (rpm) |
1036 | *rpm = !!(reply.hdr.flags & ICM_TR_FLAGS_RTD3); |
1037 | |
1038 | return 0; |
1039 | } |
1040 | |
1041 | static int icm_tr_approve_switch(struct tb *tb, struct tb_switch *sw) |
1042 | { |
1043 | struct icm_tr_pkg_approve_device request; |
1044 | struct icm_tr_pkg_approve_device reply; |
1045 | int ret; |
1046 | |
1047 | memset(&request, 0, sizeof(request)); |
1048 | memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); |
1049 | request.hdr.code = ICM_APPROVE_DEVICE; |
1050 | request.route_lo = sw->config.route_lo; |
1051 | request.route_hi = sw->config.route_hi; |
1052 | request.connection_id = sw->connection_id; |
1053 | |
1054 | memset(&reply, 0, sizeof(reply)); |
1055 | ret = icm_request(tb, request: &request, request_size: sizeof(request), response: &reply, response_size: sizeof(reply), |
1056 | npackets: 1, ICM_RETRIES, ICM_APPROVE_TIMEOUT); |
1057 | if (ret) |
1058 | return ret; |
1059 | |
1060 | if (reply.hdr.flags & ICM_FLAGS_ERROR) { |
1061 | tb_warn(tb, "PCIe tunnel creation failed\n" ); |
1062 | return -EIO; |
1063 | } |
1064 | |
1065 | return 0; |
1066 | } |
1067 | |
1068 | static int icm_tr_add_switch_key(struct tb *tb, struct tb_switch *sw) |
1069 | { |
1070 | struct icm_tr_pkg_add_device_key_response reply; |
1071 | struct icm_tr_pkg_add_device_key request; |
1072 | int ret; |
1073 | |
1074 | memset(&request, 0, sizeof(request)); |
1075 | memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); |
1076 | request.hdr.code = ICM_ADD_DEVICE_KEY; |
1077 | request.route_lo = sw->config.route_lo; |
1078 | request.route_hi = sw->config.route_hi; |
1079 | request.connection_id = sw->connection_id; |
1080 | memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE); |
1081 | |
1082 | memset(&reply, 0, sizeof(reply)); |
1083 | ret = icm_request(tb, request: &request, request_size: sizeof(request), response: &reply, response_size: sizeof(reply), |
1084 | npackets: 1, ICM_RETRIES, ICM_TIMEOUT); |
1085 | if (ret) |
1086 | return ret; |
1087 | |
1088 | if (reply.hdr.flags & ICM_FLAGS_ERROR) { |
1089 | tb_warn(tb, "Adding key to switch failed\n" ); |
1090 | return -EIO; |
1091 | } |
1092 | |
1093 | return 0; |
1094 | } |
1095 | |
1096 | static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw, |
1097 | const u8 *challenge, u8 *response) |
1098 | { |
1099 | struct icm_tr_pkg_challenge_device_response reply; |
1100 | struct icm_tr_pkg_challenge_device request; |
1101 | int ret; |
1102 | |
1103 | memset(&request, 0, sizeof(request)); |
1104 | memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); |
1105 | request.hdr.code = ICM_CHALLENGE_DEVICE; |
1106 | request.route_lo = sw->config.route_lo; |
1107 | request.route_hi = sw->config.route_hi; |
1108 | request.connection_id = sw->connection_id; |
1109 | memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE); |
1110 | |
1111 | memset(&reply, 0, sizeof(reply)); |
1112 | ret = icm_request(tb, request: &request, request_size: sizeof(request), response: &reply, response_size: sizeof(reply), |
1113 | npackets: 1, ICM_RETRIES, ICM_TIMEOUT); |
1114 | if (ret) |
1115 | return ret; |
1116 | |
1117 | if (reply.hdr.flags & ICM_FLAGS_ERROR) |
1118 | return -EKEYREJECTED; |
1119 | if (reply.hdr.flags & ICM_FLAGS_NO_KEY) |
1120 | return -ENOKEY; |
1121 | |
1122 | memcpy(response, reply.response, TB_SWITCH_KEY_SIZE); |
1123 | |
1124 | return 0; |
1125 | } |
1126 | |
1127 | static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, |
1128 | int transmit_path, int transmit_ring, |
1129 | int receive_path, int receive_ring) |
1130 | { |
1131 | struct icm_tr_pkg_approve_xdomain_response reply; |
1132 | struct icm_tr_pkg_approve_xdomain request; |
1133 | int ret; |
1134 | |
1135 | memset(&request, 0, sizeof(request)); |
1136 | request.hdr.code = ICM_APPROVE_XDOMAIN; |
1137 | request.route_hi = upper_32_bits(xd->route); |
1138 | request.route_lo = lower_32_bits(xd->route); |
1139 | request.transmit_path = transmit_path; |
1140 | request.transmit_ring = transmit_ring; |
1141 | request.receive_path = receive_path; |
1142 | request.receive_ring = receive_ring; |
1143 | memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); |
1144 | |
1145 | memset(&reply, 0, sizeof(reply)); |
1146 | ret = icm_request(tb, request: &request, request_size: sizeof(request), response: &reply, response_size: sizeof(reply), |
1147 | npackets: 1, ICM_RETRIES, ICM_TIMEOUT); |
1148 | if (ret) |
1149 | return ret; |
1150 | |
1151 | if (reply.hdr.flags & ICM_FLAGS_ERROR) |
1152 | return -EIO; |
1153 | |
1154 | return 0; |
1155 | } |
1156 | |
1157 | static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd, |
1158 | int stage) |
1159 | { |
1160 | struct icm_tr_pkg_disconnect_xdomain_response reply; |
1161 | struct icm_tr_pkg_disconnect_xdomain request; |
1162 | int ret; |
1163 | |
1164 | memset(&request, 0, sizeof(request)); |
1165 | request.hdr.code = ICM_DISCONNECT_XDOMAIN; |
1166 | request.stage = stage; |
1167 | request.route_hi = upper_32_bits(xd->route); |
1168 | request.route_lo = lower_32_bits(xd->route); |
1169 | memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); |
1170 | |
1171 | memset(&reply, 0, sizeof(reply)); |
1172 | ret = icm_request(tb, request: &request, request_size: sizeof(request), response: &reply, response_size: sizeof(reply), |
1173 | npackets: 1, ICM_RETRIES, ICM_TIMEOUT); |
1174 | if (ret) |
1175 | return ret; |
1176 | |
1177 | if (reply.hdr.flags & ICM_FLAGS_ERROR) |
1178 | return -EIO; |
1179 | |
1180 | return 0; |
1181 | } |
1182 | |
1183 | static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, |
1184 | int transmit_path, int transmit_ring, |
1185 | int receive_path, int receive_ring) |
1186 | { |
1187 | int ret; |
1188 | |
1189 | ret = icm_tr_xdomain_tear_down(tb, xd, stage: 1); |
1190 | if (ret) |
1191 | return ret; |
1192 | |
1193 | usleep_range(min: 10, max: 50); |
1194 | return icm_tr_xdomain_tear_down(tb, xd, stage: 2); |
1195 | } |
1196 | |
1197 | static void |
1198 | __icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr, |
1199 | bool force_rtd3) |
1200 | { |
1201 | const struct icm_tr_event_device_connected *pkg = |
1202 | (const struct icm_tr_event_device_connected *)hdr; |
1203 | bool authorized, boot, dual_lane, speed_gen3; |
1204 | enum tb_security_level security_level; |
1205 | struct tb_switch *sw, *parent_sw; |
1206 | struct tb_xdomain *xd; |
1207 | u64 route; |
1208 | |
1209 | icm_postpone_rescan(tb); |
1210 | |
1211 | /* |
1212 | * Currently we don't use the QoS information coming with the |
1213 | * device connected message so simply just ignore that extra |
1214 | * packet for now. |
1215 | */ |
1216 | if (pkg->hdr.packet_id) |
1217 | return; |
1218 | |
1219 | route = get_route(route_hi: pkg->route_hi, route_lo: pkg->route_lo); |
1220 | authorized = pkg->link_info & ICM_LINK_INFO_APPROVED; |
1221 | security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >> |
1222 | ICM_FLAGS_SLEVEL_SHIFT; |
1223 | boot = pkg->link_info & ICM_LINK_INFO_BOOT; |
1224 | dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE; |
1225 | speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3; |
1226 | |
1227 | if (pkg->link_info & ICM_LINK_INFO_REJECTED) { |
1228 | tb_info(tb, "switch at %llx was rejected by ICM firmware because topology limit exceeded\n" , |
1229 | route); |
1230 | return; |
1231 | } |
1232 | |
1233 | sw = tb_switch_find_by_uuid(tb, uuid: &pkg->ep_uuid); |
1234 | if (sw) { |
1235 | /* Update the switch if it is still in the same place */ |
1236 | if (tb_route(sw) == route && !!sw->authorized == authorized) { |
1237 | update_switch(sw, route, connection_id: pkg->connection_id, connection_key: 0, link: 0, depth: 0, |
1238 | boot); |
1239 | tb_switch_put(sw); |
1240 | return; |
1241 | } |
1242 | |
1243 | remove_switch(sw); |
1244 | tb_switch_put(sw); |
1245 | } |
1246 | |
1247 | /* Another switch with the same address */ |
1248 | sw = tb_switch_find_by_route(tb, route); |
1249 | if (sw) { |
1250 | remove_switch(sw); |
1251 | tb_switch_put(sw); |
1252 | } |
1253 | |
1254 | /* XDomain connection with the same address */ |
1255 | xd = tb_xdomain_find_by_route(tb, route); |
1256 | if (xd) { |
1257 | remove_xdomain(xd); |
1258 | tb_xdomain_put(xd); |
1259 | } |
1260 | |
1261 | parent_sw = tb_switch_find_by_route(tb, route: get_parent_route(route)); |
1262 | if (!parent_sw) { |
1263 | tb_err(tb, "failed to find parent switch for %llx\n" , route); |
1264 | return; |
1265 | } |
1266 | |
1267 | pm_runtime_get_sync(dev: &parent_sw->dev); |
1268 | |
1269 | sw = alloc_switch(parent_sw, route, uuid: &pkg->ep_uuid); |
1270 | if (!IS_ERR(ptr: sw)) { |
1271 | sw->connection_id = pkg->connection_id; |
1272 | sw->authorized = authorized; |
1273 | sw->security_level = security_level; |
1274 | sw->boot = boot; |
1275 | sw->link_speed = speed_gen3 ? 20 : 10; |
1276 | sw->link_width = dual_lane ? TB_LINK_WIDTH_DUAL : |
1277 | TB_LINK_WIDTH_SINGLE; |
1278 | sw->rpm = force_rtd3; |
1279 | if (!sw->rpm) |
1280 | sw->rpm = intel_vss_is_rtd3(ep_name: pkg->ep_name, |
1281 | size: sizeof(pkg->ep_name)); |
1282 | |
1283 | if (add_switch(parent_sw, sw)) |
1284 | tb_switch_put(sw); |
1285 | } |
1286 | |
1287 | pm_runtime_mark_last_busy(dev: &parent_sw->dev); |
1288 | pm_runtime_put_autosuspend(dev: &parent_sw->dev); |
1289 | |
1290 | tb_switch_put(sw: parent_sw); |
1291 | } |
1292 | |
1293 | static void |
1294 | icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) |
1295 | { |
1296 | __icm_tr_device_connected(tb, hdr, force_rtd3: false); |
1297 | } |
1298 | |
1299 | static void |
1300 | icm_tr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) |
1301 | { |
1302 | const struct icm_tr_event_device_disconnected *pkg = |
1303 | (const struct icm_tr_event_device_disconnected *)hdr; |
1304 | struct tb_switch *sw; |
1305 | u64 route; |
1306 | |
1307 | route = get_route(route_hi: pkg->route_hi, route_lo: pkg->route_lo); |
1308 | |
1309 | sw = tb_switch_find_by_route(tb, route); |
1310 | if (!sw) { |
1311 | tb_warn(tb, "no switch exists at %llx, ignoring\n" , route); |
1312 | return; |
1313 | } |
1314 | pm_runtime_get_sync(dev: sw->dev.parent); |
1315 | |
1316 | remove_switch(sw); |
1317 | |
1318 | pm_runtime_mark_last_busy(dev: sw->dev.parent); |
1319 | pm_runtime_put_autosuspend(dev: sw->dev.parent); |
1320 | |
1321 | tb_switch_put(sw); |
1322 | } |
1323 | |
1324 | static void |
1325 | icm_tr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr) |
1326 | { |
1327 | const struct icm_tr_event_xdomain_connected *pkg = |
1328 | (const struct icm_tr_event_xdomain_connected *)hdr; |
1329 | struct tb_xdomain *xd; |
1330 | struct tb_switch *sw; |
1331 | u64 route; |
1332 | |
1333 | if (!tb->root_switch) |
1334 | return; |
1335 | |
1336 | route = get_route(route_hi: pkg->local_route_hi, route_lo: pkg->local_route_lo); |
1337 | |
1338 | xd = tb_xdomain_find_by_uuid(tb, uuid: &pkg->remote_uuid); |
1339 | if (xd) { |
1340 | if (xd->route == route) { |
1341 | update_xdomain(xd, route, link: 0); |
1342 | tb_xdomain_put(xd); |
1343 | return; |
1344 | } |
1345 | |
1346 | remove_xdomain(xd); |
1347 | tb_xdomain_put(xd); |
1348 | } |
1349 | |
1350 | /* An existing xdomain with the same address */ |
1351 | xd = tb_xdomain_find_by_route(tb, route); |
1352 | if (xd) { |
1353 | remove_xdomain(xd); |
1354 | tb_xdomain_put(xd); |
1355 | } |
1356 | |
1357 | /* |
1358 | * If the user disconnected a switch during suspend and |
1359 | * connected another host to the same port, remove the switch |
1360 | * first. |
1361 | */ |
1362 | sw = tb_switch_find_by_route(tb, route); |
1363 | if (sw) { |
1364 | remove_switch(sw); |
1365 | tb_switch_put(sw); |
1366 | } |
1367 | |
1368 | sw = tb_switch_find_by_route(tb, route: get_parent_route(route)); |
1369 | if (!sw) { |
1370 | tb_warn(tb, "no switch exists at %llx, ignoring\n" , route); |
1371 | return; |
1372 | } |
1373 | |
1374 | add_xdomain(sw, route, local_uuid: &pkg->local_uuid, remote_uuid: &pkg->remote_uuid, link: 0, depth: 0); |
1375 | tb_switch_put(sw); |
1376 | } |
1377 | |
1378 | static void |
1379 | icm_tr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) |
1380 | { |
1381 | const struct icm_tr_event_xdomain_disconnected *pkg = |
1382 | (const struct icm_tr_event_xdomain_disconnected *)hdr; |
1383 | struct tb_xdomain *xd; |
1384 | u64 route; |
1385 | |
1386 | route = get_route(route_hi: pkg->route_hi, route_lo: pkg->route_lo); |
1387 | |
1388 | xd = tb_xdomain_find_by_route(tb, route); |
1389 | if (xd) { |
1390 | remove_xdomain(xd); |
1391 | tb_xdomain_put(xd); |
1392 | } |
1393 | } |
1394 | |
1395 | static struct pci_dev *get_upstream_port(struct pci_dev *pdev) |
1396 | { |
1397 | struct pci_dev *parent; |
1398 | |
1399 | parent = pci_upstream_bridge(dev: pdev); |
1400 | while (parent) { |
1401 | if (!pci_is_pcie(dev: parent)) |
1402 | return NULL; |
1403 | if (pci_pcie_type(dev: parent) == PCI_EXP_TYPE_UPSTREAM) |
1404 | break; |
1405 | parent = pci_upstream_bridge(dev: parent); |
1406 | } |
1407 | |
1408 | if (!parent) |
1409 | return NULL; |
1410 | |
1411 | switch (parent->device) { |
1412 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: |
1413 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE: |
1414 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: |
1415 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: |
1416 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: |
1417 | case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: |
1418 | case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: |
1419 | return parent; |
1420 | } |
1421 | |
1422 | return NULL; |
1423 | } |
1424 | |
1425 | static bool icm_ar_is_supported(struct tb *tb) |
1426 | { |
1427 | struct pci_dev *upstream_port; |
1428 | struct icm *icm = tb_priv(tb); |
1429 | |
1430 | /* |
1431 | * Starting from Alpine Ridge we can use ICM on Apple machines |
1432 | * as well. We just need to reset and re-enable it first. |
1433 | * However, only start it if explicitly asked by the user. |
1434 | */ |
1435 | if (icm_firmware_running(nhi: tb->nhi)) |
1436 | return true; |
1437 | if (!start_icm) |
1438 | return false; |
1439 | |
1440 | /* |
1441 | * Find the upstream PCIe port in case we need to do reset |
1442 | * through its vendor specific registers. |
1443 | */ |
1444 | upstream_port = get_upstream_port(pdev: tb->nhi->pdev); |
1445 | if (upstream_port) { |
1446 | int cap; |
1447 | |
1448 | cap = pci_find_ext_capability(dev: upstream_port, |
1449 | PCI_EXT_CAP_ID_VNDR); |
1450 | if (cap > 0) { |
1451 | icm->upstream_port = upstream_port; |
1452 | icm->vnd_cap = cap; |
1453 | |
1454 | return true; |
1455 | } |
1456 | } |
1457 | |
1458 | return false; |
1459 | } |
1460 | |
1461 | static int icm_ar_cio_reset(struct tb *tb) |
1462 | { |
1463 | return pcie2cio_write(icm: tb_priv(tb), cs: TB_CFG_SWITCH, port: 0, index: 0x50, BIT(9)); |
1464 | } |
1465 | |
1466 | static int icm_ar_get_mode(struct tb *tb) |
1467 | { |
1468 | struct tb_nhi *nhi = tb->nhi; |
1469 | int retries = 60; |
1470 | u32 val; |
1471 | |
1472 | do { |
1473 | val = ioread32(nhi->iobase + REG_FW_STS); |
1474 | if (val & REG_FW_STS_NVM_AUTH_DONE) |
1475 | break; |
1476 | msleep(msecs: 50); |
1477 | } while (--retries); |
1478 | |
1479 | if (!retries) { |
1480 | dev_err(&nhi->pdev->dev, "ICM firmware not authenticated\n" ); |
1481 | return -ENODEV; |
1482 | } |
1483 | |
1484 | return nhi_mailbox_mode(nhi); |
1485 | } |
1486 | |
1487 | static int |
1488 | icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level, |
1489 | u8 *proto_version, size_t *nboot_acl, bool *rpm) |
1490 | { |
1491 | struct icm_ar_pkg_driver_ready_response reply; |
1492 | struct icm_pkg_driver_ready request = { |
1493 | .hdr.code = ICM_DRIVER_READY, |
1494 | }; |
1495 | int ret; |
1496 | |
1497 | memset(&reply, 0, sizeof(reply)); |
1498 | ret = icm_request(tb, request: &request, request_size: sizeof(request), response: &reply, response_size: sizeof(reply), |
1499 | npackets: 1, ICM_RETRIES, ICM_TIMEOUT); |
1500 | if (ret) |
1501 | return ret; |
1502 | |
1503 | if (security_level) |
1504 | *security_level = reply.info & ICM_AR_INFO_SLEVEL_MASK; |
1505 | if (nboot_acl && (reply.info & ICM_AR_INFO_BOOT_ACL_SUPPORTED)) |
1506 | *nboot_acl = (reply.info & ICM_AR_INFO_BOOT_ACL_MASK) >> |
1507 | ICM_AR_INFO_BOOT_ACL_SHIFT; |
1508 | if (rpm) |
1509 | *rpm = !!(reply.hdr.flags & ICM_AR_FLAGS_RTD3); |
1510 | |
1511 | return 0; |
1512 | } |
1513 | |
1514 | static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route) |
1515 | { |
1516 | struct icm_ar_pkg_get_route_response reply; |
1517 | struct icm_ar_pkg_get_route request = { |
1518 | .hdr = { .code = ICM_GET_ROUTE }, |
1519 | .link_info = depth << ICM_LINK_INFO_DEPTH_SHIFT | link, |
1520 | }; |
1521 | int ret; |
1522 | |
1523 | memset(&reply, 0, sizeof(reply)); |
1524 | ret = icm_request(tb, request: &request, request_size: sizeof(request), response: &reply, response_size: sizeof(reply), |
1525 | npackets: 1, ICM_RETRIES, ICM_TIMEOUT); |
1526 | if (ret) |
1527 | return ret; |
1528 | |
1529 | if (reply.hdr.flags & ICM_FLAGS_ERROR) |
1530 | return -EIO; |
1531 | |
1532 | *route = get_route(route_hi: reply.route_hi, route_lo: reply.route_lo); |
1533 | return 0; |
1534 | } |
1535 | |
1536 | static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids) |
1537 | { |
1538 | struct icm_ar_pkg_preboot_acl_response reply; |
1539 | struct icm_ar_pkg_preboot_acl request = { |
1540 | .hdr = { .code = ICM_PREBOOT_ACL }, |
1541 | }; |
1542 | int ret, i; |
1543 | |
1544 | memset(&reply, 0, sizeof(reply)); |
1545 | ret = icm_request(tb, request: &request, request_size: sizeof(request), response: &reply, response_size: sizeof(reply), |
1546 | npackets: 1, ICM_RETRIES, ICM_TIMEOUT); |
1547 | if (ret) |
1548 | return ret; |
1549 | |
1550 | if (reply.hdr.flags & ICM_FLAGS_ERROR) |
1551 | return -EIO; |
1552 | |
1553 | for (i = 0; i < nuuids; i++) { |
1554 | u32 *uuid = (u32 *)&uuids[i]; |
1555 | |
1556 | uuid[0] = reply.acl[i].uuid_lo; |
1557 | uuid[1] = reply.acl[i].uuid_hi; |
1558 | |
1559 | if (uuid[0] == 0xffffffff && uuid[1] == 0xffffffff) { |
1560 | /* Map empty entries to null UUID */ |
1561 | uuid[0] = 0; |
1562 | uuid[1] = 0; |
1563 | } else if (uuid[0] != 0 || uuid[1] != 0) { |
1564 | /* Upper two DWs are always one's */ |
1565 | uuid[2] = 0xffffffff; |
1566 | uuid[3] = 0xffffffff; |
1567 | } |
1568 | } |
1569 | |
1570 | return ret; |
1571 | } |
1572 | |
1573 | static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids, |
1574 | size_t nuuids) |
1575 | { |
1576 | struct icm_ar_pkg_preboot_acl_response reply; |
1577 | struct icm_ar_pkg_preboot_acl request = { |
1578 | .hdr = { |
1579 | .code = ICM_PREBOOT_ACL, |
1580 | .flags = ICM_FLAGS_WRITE, |
1581 | }, |
1582 | }; |
1583 | int ret, i; |
1584 | |
1585 | for (i = 0; i < nuuids; i++) { |
1586 | const u32 *uuid = (const u32 *)&uuids[i]; |
1587 | |
1588 | if (uuid_is_null(uuid: &uuids[i])) { |
1589 | /* |
1590 | * Map null UUID to the empty (all one) entries |
1591 | * for ICM. |
1592 | */ |
1593 | request.acl[i].uuid_lo = 0xffffffff; |
1594 | request.acl[i].uuid_hi = 0xffffffff; |
1595 | } else { |
1596 | /* Two high DWs need to be set to all one */ |
1597 | if (uuid[2] != 0xffffffff || uuid[3] != 0xffffffff) |
1598 | return -EINVAL; |
1599 | |
1600 | request.acl[i].uuid_lo = uuid[0]; |
1601 | request.acl[i].uuid_hi = uuid[1]; |
1602 | } |
1603 | } |
1604 | |
1605 | memset(&reply, 0, sizeof(reply)); |
1606 | ret = icm_request(tb, request: &request, request_size: sizeof(request), response: &reply, response_size: sizeof(reply), |
1607 | npackets: 1, ICM_RETRIES, ICM_TIMEOUT); |
1608 | if (ret) |
1609 | return ret; |
1610 | |
1611 | if (reply.hdr.flags & ICM_FLAGS_ERROR) |
1612 | return -EIO; |
1613 | |
1614 | return 0; |
1615 | } |
1616 | |
1617 | static int |
1618 | icm_icl_driver_ready(struct tb *tb, enum tb_security_level *security_level, |
1619 | u8 *proto_version, size_t *nboot_acl, bool *rpm) |
1620 | { |
1621 | struct icm_tr_pkg_driver_ready_response reply; |
1622 | struct icm_pkg_driver_ready request = { |
1623 | .hdr.code = ICM_DRIVER_READY, |
1624 | }; |
1625 | int ret; |
1626 | |
1627 | memset(&reply, 0, sizeof(reply)); |
1628 | ret = icm_request(tb, request: &request, request_size: sizeof(request), response: &reply, response_size: sizeof(reply), |
1629 | npackets: 1, ICM_RETRIES, timeout_msec: 20000); |
1630 | if (ret) |
1631 | return ret; |
1632 | |
1633 | if (proto_version) |
1634 | *proto_version = (reply.info & ICM_TR_INFO_PROTO_VERSION_MASK) >> |
1635 | ICM_TR_INFO_PROTO_VERSION_SHIFT; |
1636 | |
1637 | /* Ice Lake always supports RTD3 */ |
1638 | if (rpm) |
1639 | *rpm = true; |
1640 | |
1641 | return 0; |
1642 | } |
1643 | |
1644 | static void icm_icl_set_uuid(struct tb *tb) |
1645 | { |
1646 | struct tb_nhi *nhi = tb->nhi; |
1647 | u32 uuid[4]; |
1648 | |
1649 | pci_read_config_dword(dev: nhi->pdev, VS_CAP_10, val: &uuid[0]); |
1650 | pci_read_config_dword(dev: nhi->pdev, VS_CAP_11, val: &uuid[1]); |
1651 | uuid[2] = 0xffffffff; |
1652 | uuid[3] = 0xffffffff; |
1653 | |
1654 | tb->root_switch->uuid = kmemdup(p: uuid, size: sizeof(uuid), GFP_KERNEL); |
1655 | } |
1656 | |
1657 | static void |
1658 | icm_icl_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) |
1659 | { |
1660 | __icm_tr_device_connected(tb, hdr, force_rtd3: true); |
1661 | } |
1662 | |
1663 | static void icm_icl_rtd3_veto(struct tb *tb, const struct icm_pkg_header *hdr) |
1664 | { |
1665 | const struct icm_icl_event_rtd3_veto *pkg = |
1666 | (const struct icm_icl_event_rtd3_veto *)hdr; |
1667 | |
1668 | tb_dbg(tb, "ICM rtd3 veto=0x%08x\n" , pkg->veto_reason); |
1669 | |
1670 | if (pkg->veto_reason) |
1671 | icm_veto_begin(tb); |
1672 | else |
1673 | icm_veto_end(tb); |
1674 | } |
1675 | |
1676 | static bool icm_tgl_is_supported(struct tb *tb) |
1677 | { |
1678 | unsigned long end = jiffies + msecs_to_jiffies(m: 10); |
1679 | |
1680 | do { |
1681 | u32 val; |
1682 | |
1683 | val = ioread32(tb->nhi->iobase + REG_FW_STS); |
1684 | if (val & REG_FW_STS_NVM_AUTH_DONE) |
1685 | return true; |
1686 | usleep_range(min: 100, max: 500); |
1687 | } while (time_before(jiffies, end)); |
1688 | |
1689 | return false; |
1690 | } |
1691 | |
1692 | static void icm_handle_notification(struct work_struct *work) |
1693 | { |
1694 | struct icm_notification *n = container_of(work, typeof(*n), work); |
1695 | struct tb *tb = n->tb; |
1696 | struct icm *icm = tb_priv(tb); |
1697 | |
1698 | mutex_lock(&tb->lock); |
1699 | |
1700 | /* |
1701 | * When the domain is stopped we flush its workqueue but before |
1702 | * that the root switch is removed. In that case we should treat |
1703 | * the queued events as being canceled. |
1704 | */ |
1705 | if (tb->root_switch) { |
1706 | switch (n->pkg->code) { |
1707 | case ICM_EVENT_DEVICE_CONNECTED: |
1708 | icm->device_connected(tb, n->pkg); |
1709 | break; |
1710 | case ICM_EVENT_DEVICE_DISCONNECTED: |
1711 | icm->device_disconnected(tb, n->pkg); |
1712 | break; |
1713 | case ICM_EVENT_XDOMAIN_CONNECTED: |
1714 | if (tb_is_xdomain_enabled()) |
1715 | icm->xdomain_connected(tb, n->pkg); |
1716 | break; |
1717 | case ICM_EVENT_XDOMAIN_DISCONNECTED: |
1718 | if (tb_is_xdomain_enabled()) |
1719 | icm->xdomain_disconnected(tb, n->pkg); |
1720 | break; |
1721 | case ICM_EVENT_RTD3_VETO: |
1722 | icm->rtd3_veto(tb, n->pkg); |
1723 | break; |
1724 | } |
1725 | } |
1726 | |
1727 | mutex_unlock(lock: &tb->lock); |
1728 | |
1729 | kfree(objp: n->pkg); |
1730 | kfree(objp: n); |
1731 | } |
1732 | |
1733 | static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type, |
1734 | const void *buf, size_t size) |
1735 | { |
1736 | struct icm_notification *n; |
1737 | |
1738 | n = kmalloc(size: sizeof(*n), GFP_KERNEL); |
1739 | if (!n) |
1740 | return; |
1741 | |
1742 | n->pkg = kmemdup(p: buf, size, GFP_KERNEL); |
1743 | if (!n->pkg) { |
1744 | kfree(objp: n); |
1745 | return; |
1746 | } |
1747 | |
1748 | INIT_WORK(&n->work, icm_handle_notification); |
1749 | n->tb = tb; |
1750 | |
1751 | queue_work(wq: tb->wq, work: &n->work); |
1752 | } |
1753 | |
1754 | static int |
1755 | __icm_driver_ready(struct tb *tb, enum tb_security_level *security_level, |
1756 | u8 *proto_version, size_t *nboot_acl, bool *rpm) |
1757 | { |
1758 | struct icm *icm = tb_priv(tb); |
1759 | unsigned int retries = 50; |
1760 | int ret; |
1761 | |
1762 | ret = icm->driver_ready(tb, security_level, proto_version, nboot_acl, |
1763 | rpm); |
1764 | if (ret) { |
1765 | tb_err(tb, "failed to send driver ready to ICM\n" ); |
1766 | return ret; |
1767 | } |
1768 | |
1769 | /* |
1770 | * Hold on here until the switch config space is accessible so |
1771 | * that we can read root switch config successfully. |
1772 | */ |
1773 | do { |
1774 | struct tb_cfg_result res; |
1775 | u32 tmp; |
1776 | |
1777 | res = tb_cfg_read_raw(ctl: tb->ctl, buffer: &tmp, route: 0, port: 0, space: TB_CFG_SWITCH, |
1778 | offset: 0, length: 1, timeout_msec: 100); |
1779 | if (!res.err) |
1780 | return 0; |
1781 | |
1782 | msleep(msecs: 50); |
1783 | } while (--retries); |
1784 | |
1785 | tb_err(tb, "failed to read root switch config space, giving up\n" ); |
1786 | return -ETIMEDOUT; |
1787 | } |
1788 | |
1789 | static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi) |
1790 | { |
1791 | struct icm *icm = tb_priv(tb); |
1792 | u32 val; |
1793 | |
1794 | if (!icm->upstream_port) |
1795 | return -ENODEV; |
1796 | |
1797 | /* Put ARC to wait for CIO reset event to happen */ |
1798 | val = ioread32(nhi->iobase + REG_FW_STS); |
1799 | val |= REG_FW_STS_CIO_RESET_REQ; |
1800 | iowrite32(val, nhi->iobase + REG_FW_STS); |
1801 | |
1802 | /* Re-start ARC */ |
1803 | val = ioread32(nhi->iobase + REG_FW_STS); |
1804 | val |= REG_FW_STS_ICM_EN_INVERT; |
1805 | val |= REG_FW_STS_ICM_EN_CPU; |
1806 | iowrite32(val, nhi->iobase + REG_FW_STS); |
1807 | |
1808 | /* Trigger CIO reset now */ |
1809 | return icm->cio_reset(tb); |
1810 | } |
1811 | |
1812 | static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi) |
1813 | { |
1814 | unsigned int retries = 10; |
1815 | int ret; |
1816 | u32 val; |
1817 | |
1818 | /* Check if the ICM firmware is already running */ |
1819 | if (icm_firmware_running(nhi)) |
1820 | return 0; |
1821 | |
1822 | dev_dbg(&nhi->pdev->dev, "starting ICM firmware\n" ); |
1823 | |
1824 | ret = icm_firmware_reset(tb, nhi); |
1825 | if (ret) |
1826 | return ret; |
1827 | |
1828 | /* Wait until the ICM firmware tells us it is up and running */ |
1829 | do { |
1830 | /* Check that the ICM firmware is running */ |
1831 | val = ioread32(nhi->iobase + REG_FW_STS); |
1832 | if (val & REG_FW_STS_NVM_AUTH_DONE) |
1833 | return 0; |
1834 | |
1835 | msleep(msecs: 300); |
1836 | } while (--retries); |
1837 | |
1838 | return -ETIMEDOUT; |
1839 | } |
1840 | |
1841 | static int icm_reset_phy_port(struct tb *tb, int phy_port) |
1842 | { |
1843 | struct icm *icm = tb_priv(tb); |
1844 | u32 state0, state1; |
1845 | int port0, port1; |
1846 | u32 val0, val1; |
1847 | int ret; |
1848 | |
1849 | if (!icm->upstream_port) |
1850 | return 0; |
1851 | |
1852 | if (phy_port) { |
1853 | port0 = 3; |
1854 | port1 = 4; |
1855 | } else { |
1856 | port0 = 1; |
1857 | port1 = 2; |
1858 | } |
1859 | |
1860 | /* |
1861 | * Read link status of both null ports belonging to a single |
1862 | * physical port. |
1863 | */ |
1864 | ret = pcie2cio_read(icm, cs: TB_CFG_PORT, port: port0, PHY_PORT_CS1, data: &val0); |
1865 | if (ret) |
1866 | return ret; |
1867 | ret = pcie2cio_read(icm, cs: TB_CFG_PORT, port: port1, PHY_PORT_CS1, data: &val1); |
1868 | if (ret) |
1869 | return ret; |
1870 | |
1871 | state0 = val0 & PHY_PORT_CS1_LINK_STATE_MASK; |
1872 | state0 >>= PHY_PORT_CS1_LINK_STATE_SHIFT; |
1873 | state1 = val1 & PHY_PORT_CS1_LINK_STATE_MASK; |
1874 | state1 >>= PHY_PORT_CS1_LINK_STATE_SHIFT; |
1875 | |
1876 | /* If they are both up we need to reset them now */ |
1877 | if (state0 != TB_PORT_UP || state1 != TB_PORT_UP) |
1878 | return 0; |
1879 | |
1880 | val0 |= PHY_PORT_CS1_LINK_DISABLE; |
1881 | ret = pcie2cio_write(icm, cs: TB_CFG_PORT, port: port0, PHY_PORT_CS1, data: val0); |
1882 | if (ret) |
1883 | return ret; |
1884 | |
1885 | val1 |= PHY_PORT_CS1_LINK_DISABLE; |
1886 | ret = pcie2cio_write(icm, cs: TB_CFG_PORT, port: port1, PHY_PORT_CS1, data: val1); |
1887 | if (ret) |
1888 | return ret; |
1889 | |
1890 | /* Wait a bit and then re-enable both ports */ |
1891 | usleep_range(min: 10, max: 100); |
1892 | |
1893 | ret = pcie2cio_read(icm, cs: TB_CFG_PORT, port: port0, PHY_PORT_CS1, data: &val0); |
1894 | if (ret) |
1895 | return ret; |
1896 | ret = pcie2cio_read(icm, cs: TB_CFG_PORT, port: port1, PHY_PORT_CS1, data: &val1); |
1897 | if (ret) |
1898 | return ret; |
1899 | |
1900 | val0 &= ~PHY_PORT_CS1_LINK_DISABLE; |
1901 | ret = pcie2cio_write(icm, cs: TB_CFG_PORT, port: port0, PHY_PORT_CS1, data: val0); |
1902 | if (ret) |
1903 | return ret; |
1904 | |
1905 | val1 &= ~PHY_PORT_CS1_LINK_DISABLE; |
1906 | return pcie2cio_write(icm, cs: TB_CFG_PORT, port: port1, PHY_PORT_CS1, data: val1); |
1907 | } |
1908 | |
1909 | static int icm_firmware_init(struct tb *tb) |
1910 | { |
1911 | struct icm *icm = tb_priv(tb); |
1912 | struct tb_nhi *nhi = tb->nhi; |
1913 | int ret; |
1914 | |
1915 | ret = icm_firmware_start(tb, nhi); |
1916 | if (ret) { |
1917 | dev_err(&nhi->pdev->dev, "could not start ICM firmware\n" ); |
1918 | return ret; |
1919 | } |
1920 | |
1921 | if (icm->get_mode) { |
1922 | ret = icm->get_mode(tb); |
1923 | |
1924 | switch (ret) { |
1925 | case NHI_FW_SAFE_MODE: |
1926 | icm->safe_mode = true; |
1927 | break; |
1928 | |
1929 | case NHI_FW_CM_MODE: |
1930 | /* Ask ICM to accept all Thunderbolt devices */ |
1931 | nhi_mailbox_cmd(nhi, cmd: NHI_MAILBOX_ALLOW_ALL_DEVS, data: 0); |
1932 | break; |
1933 | |
1934 | default: |
1935 | if (ret < 0) |
1936 | return ret; |
1937 | |
1938 | tb_err(tb, "ICM firmware is in wrong mode: %u\n" , ret); |
1939 | return -ENODEV; |
1940 | } |
1941 | } |
1942 | |
1943 | /* |
1944 | * Reset both physical ports if there is anything connected to |
1945 | * them already. |
1946 | */ |
1947 | ret = icm_reset_phy_port(tb, phy_port: 0); |
1948 | if (ret) |
1949 | dev_warn(&nhi->pdev->dev, "failed to reset links on port0\n" ); |
1950 | ret = icm_reset_phy_port(tb, phy_port: 1); |
1951 | if (ret) |
1952 | dev_warn(&nhi->pdev->dev, "failed to reset links on port1\n" ); |
1953 | |
1954 | return 0; |
1955 | } |
1956 | |
1957 | static int icm_driver_ready(struct tb *tb) |
1958 | { |
1959 | struct icm *icm = tb_priv(tb); |
1960 | int ret; |
1961 | |
1962 | ret = icm_firmware_init(tb); |
1963 | if (ret) |
1964 | return ret; |
1965 | |
1966 | if (icm->safe_mode) { |
1967 | tb_info(tb, "Thunderbolt host controller is in safe mode.\n" ); |
1968 | tb_info(tb, "You need to update NVM firmware of the controller before it can be used.\n" ); |
1969 | tb_info(tb, "For latest updates check https://thunderbolttechnology.net/updates.\n" ); |
1970 | return 0; |
1971 | } |
1972 | |
1973 | ret = __icm_driver_ready(tb, security_level: &tb->security_level, proto_version: &icm->proto_version, |
1974 | nboot_acl: &tb->nboot_acl, rpm: &icm->rpm); |
1975 | if (ret) |
1976 | return ret; |
1977 | |
1978 | /* |
1979 | * Make sure the number of supported preboot ACL matches what we |
1980 | * expect or disable the whole feature. |
1981 | */ |
1982 | if (tb->nboot_acl > icm->max_boot_acl) |
1983 | tb->nboot_acl = 0; |
1984 | |
1985 | if (icm->proto_version >= 3) |
1986 | tb_dbg(tb, "USB4 proxy operations supported\n" ); |
1987 | |
1988 | return 0; |
1989 | } |
1990 | |
1991 | static int icm_suspend(struct tb *tb) |
1992 | { |
1993 | struct icm *icm = tb_priv(tb); |
1994 | |
1995 | if (icm->save_devices) |
1996 | icm->save_devices(tb); |
1997 | |
1998 | nhi_mailbox_cmd(nhi: tb->nhi, cmd: NHI_MAILBOX_DRV_UNLOADS, data: 0); |
1999 | return 0; |
2000 | } |
2001 | |
2002 | /* |
2003 | * Mark all switches (except root switch) below this one unplugged. ICM |
2004 | * firmware will send us an updated list of switches after we have send |
2005 | * it driver ready command. If a switch is not in that list it will be |
2006 | * removed when we perform rescan. |
2007 | */ |
2008 | static void icm_unplug_children(struct tb_switch *sw) |
2009 | { |
2010 | struct tb_port *port; |
2011 | |
2012 | if (tb_route(sw)) |
2013 | sw->is_unplugged = true; |
2014 | |
2015 | tb_switch_for_each_port(sw, port) { |
2016 | if (port->xdomain) |
2017 | port->xdomain->is_unplugged = true; |
2018 | else if (tb_port_has_remote(port)) |
2019 | icm_unplug_children(sw: port->remote->sw); |
2020 | } |
2021 | } |
2022 | |
2023 | static int complete_rpm(struct device *dev, void *data) |
2024 | { |
2025 | struct tb_switch *sw = tb_to_switch(dev); |
2026 | |
2027 | if (sw) |
2028 | complete(&sw->rpm_complete); |
2029 | return 0; |
2030 | } |
2031 | |
2032 | static void remove_unplugged_switch(struct tb_switch *sw) |
2033 | { |
2034 | struct device *parent = get_device(dev: sw->dev.parent); |
2035 | |
2036 | pm_runtime_get_sync(dev: parent); |
2037 | |
2038 | /* |
2039 | * Signal this and switches below for rpm_complete because |
2040 | * tb_switch_remove() calls pm_runtime_get_sync() that then waits |
2041 | * for it. |
2042 | */ |
2043 | complete_rpm(dev: &sw->dev, NULL); |
2044 | bus_for_each_dev(bus: &tb_bus_type, start: &sw->dev, NULL, fn: complete_rpm); |
2045 | tb_switch_remove(sw); |
2046 | |
2047 | pm_runtime_mark_last_busy(dev: parent); |
2048 | pm_runtime_put_autosuspend(dev: parent); |
2049 | |
2050 | put_device(dev: parent); |
2051 | } |
2052 | |
2053 | static void icm_free_unplugged_children(struct tb_switch *sw) |
2054 | { |
2055 | struct tb_port *port; |
2056 | |
2057 | tb_switch_for_each_port(sw, port) { |
2058 | if (port->xdomain && port->xdomain->is_unplugged) { |
2059 | tb_xdomain_remove(xd: port->xdomain); |
2060 | port->xdomain = NULL; |
2061 | } else if (tb_port_has_remote(port)) { |
2062 | if (port->remote->sw->is_unplugged) { |
2063 | remove_unplugged_switch(sw: port->remote->sw); |
2064 | port->remote = NULL; |
2065 | } else { |
2066 | icm_free_unplugged_children(sw: port->remote->sw); |
2067 | } |
2068 | } |
2069 | } |
2070 | } |
2071 | |
2072 | static void icm_rescan_work(struct work_struct *work) |
2073 | { |
2074 | struct icm *icm = container_of(work, struct icm, rescan_work.work); |
2075 | struct tb *tb = icm_to_tb(icm); |
2076 | |
2077 | mutex_lock(&tb->lock); |
2078 | if (tb->root_switch) |
2079 | icm_free_unplugged_children(sw: tb->root_switch); |
2080 | mutex_unlock(lock: &tb->lock); |
2081 | } |
2082 | |
2083 | static void icm_complete(struct tb *tb) |
2084 | { |
2085 | struct icm *icm = tb_priv(tb); |
2086 | |
2087 | if (tb->nhi->going_away) |
2088 | return; |
2089 | |
2090 | /* |
2091 | * If RTD3 was vetoed before we entered system suspend allow it |
2092 | * again now before driver ready is sent. Firmware sends a new RTD3 |
2093 | * veto if it is still the case after we have sent it driver ready |
2094 | * command. |
2095 | */ |
2096 | icm_veto_end(tb); |
2097 | icm_unplug_children(sw: tb->root_switch); |
2098 | |
2099 | /* |
2100 | * Now all existing children should be resumed, start events |
2101 | * from ICM to get updated status. |
2102 | */ |
2103 | __icm_driver_ready(tb, NULL, NULL, NULL, NULL); |
2104 | |
2105 | /* |
2106 | * We do not get notifications of devices that have been |
2107 | * unplugged during suspend so schedule rescan to clean them up |
2108 | * if any. |
2109 | */ |
2110 | queue_delayed_work(wq: tb->wq, dwork: &icm->rescan_work, delay: msecs_to_jiffies(m: 500)); |
2111 | } |
2112 | |
2113 | static int icm_runtime_suspend(struct tb *tb) |
2114 | { |
2115 | nhi_mailbox_cmd(nhi: tb->nhi, cmd: NHI_MAILBOX_DRV_UNLOADS, data: 0); |
2116 | return 0; |
2117 | } |
2118 | |
2119 | static int icm_runtime_suspend_switch(struct tb_switch *sw) |
2120 | { |
2121 | if (tb_route(sw)) |
2122 | reinit_completion(x: &sw->rpm_complete); |
2123 | return 0; |
2124 | } |
2125 | |
2126 | static int icm_runtime_resume_switch(struct tb_switch *sw) |
2127 | { |
2128 | if (tb_route(sw)) { |
2129 | if (!wait_for_completion_timeout(x: &sw->rpm_complete, |
2130 | timeout: msecs_to_jiffies(m: 500))) { |
2131 | dev_dbg(&sw->dev, "runtime resuming timed out\n" ); |
2132 | } |
2133 | } |
2134 | return 0; |
2135 | } |
2136 | |
2137 | static int icm_runtime_resume(struct tb *tb) |
2138 | { |
2139 | /* |
2140 | * We can reuse the same resume functionality than with system |
2141 | * suspend. |
2142 | */ |
2143 | icm_complete(tb); |
2144 | return 0; |
2145 | } |
2146 | |
2147 | static int icm_start(struct tb *tb) |
2148 | { |
2149 | struct icm *icm = tb_priv(tb); |
2150 | int ret; |
2151 | |
2152 | if (icm->safe_mode) |
2153 | tb->root_switch = tb_switch_alloc_safe_mode(tb, parent: &tb->dev, route: 0); |
2154 | else |
2155 | tb->root_switch = tb_switch_alloc(tb, parent: &tb->dev, route: 0); |
2156 | if (IS_ERR(ptr: tb->root_switch)) |
2157 | return PTR_ERR(ptr: tb->root_switch); |
2158 | |
2159 | tb->root_switch->no_nvm_upgrade = !icm->can_upgrade_nvm; |
2160 | tb->root_switch->rpm = icm->rpm; |
2161 | |
2162 | if (icm->set_uuid) |
2163 | icm->set_uuid(tb); |
2164 | |
2165 | ret = tb_switch_add(sw: tb->root_switch); |
2166 | if (ret) { |
2167 | tb_switch_put(sw: tb->root_switch); |
2168 | tb->root_switch = NULL; |
2169 | } |
2170 | |
2171 | return ret; |
2172 | } |
2173 | |
2174 | static void icm_stop(struct tb *tb) |
2175 | { |
2176 | struct icm *icm = tb_priv(tb); |
2177 | |
2178 | cancel_delayed_work(dwork: &icm->rescan_work); |
2179 | tb_switch_remove(sw: tb->root_switch); |
2180 | tb->root_switch = NULL; |
2181 | nhi_mailbox_cmd(nhi: tb->nhi, cmd: NHI_MAILBOX_DRV_UNLOADS, data: 0); |
2182 | kfree(objp: icm->last_nvm_auth); |
2183 | icm->last_nvm_auth = NULL; |
2184 | } |
2185 | |
2186 | static int icm_disconnect_pcie_paths(struct tb *tb) |
2187 | { |
2188 | return nhi_mailbox_cmd(nhi: tb->nhi, cmd: NHI_MAILBOX_DISCONNECT_PCIE_PATHS, data: 0); |
2189 | } |
2190 | |
2191 | static void icm_usb4_switch_nvm_auth_complete(void *data) |
2192 | { |
2193 | struct usb4_switch_nvm_auth *auth = data; |
2194 | struct icm *icm = auth->icm; |
2195 | struct tb *tb = icm_to_tb(icm); |
2196 | |
2197 | tb_dbg(tb, "NVM_AUTH response for %llx flags %#x status %#x\n" , |
2198 | get_route(auth->reply.route_hi, auth->reply.route_lo), |
2199 | auth->reply.hdr.flags, auth->reply.status); |
2200 | |
2201 | mutex_lock(&tb->lock); |
2202 | if (WARN_ON(icm->last_nvm_auth)) |
2203 | kfree(objp: icm->last_nvm_auth); |
2204 | icm->last_nvm_auth = auth; |
2205 | mutex_unlock(lock: &tb->lock); |
2206 | } |
2207 | |
2208 | static int icm_usb4_switch_nvm_authenticate(struct tb *tb, u64 route) |
2209 | { |
2210 | struct usb4_switch_nvm_auth *auth; |
2211 | struct icm *icm = tb_priv(tb); |
2212 | struct tb_cfg_request *req; |
2213 | int ret; |
2214 | |
2215 | auth = kzalloc(size: sizeof(*auth), GFP_KERNEL); |
2216 | if (!auth) |
2217 | return -ENOMEM; |
2218 | |
2219 | auth->icm = icm; |
2220 | auth->request.hdr.code = ICM_USB4_SWITCH_OP; |
2221 | auth->request.route_hi = upper_32_bits(route); |
2222 | auth->request.route_lo = lower_32_bits(route); |
2223 | auth->request.opcode = USB4_SWITCH_OP_NVM_AUTH; |
2224 | |
2225 | req = tb_cfg_request_alloc(); |
2226 | if (!req) { |
2227 | ret = -ENOMEM; |
2228 | goto err_free_auth; |
2229 | } |
2230 | |
2231 | req->match = icm_match; |
2232 | req->copy = icm_copy; |
2233 | req->request = &auth->request; |
2234 | req->request_size = sizeof(auth->request); |
2235 | req->request_type = TB_CFG_PKG_ICM_CMD; |
2236 | req->response = &auth->reply; |
2237 | req->npackets = 1; |
2238 | req->response_size = sizeof(auth->reply); |
2239 | req->response_type = TB_CFG_PKG_ICM_RESP; |
2240 | |
2241 | tb_dbg(tb, "NVM_AUTH request for %llx\n" , route); |
2242 | |
2243 | mutex_lock(&icm->request_lock); |
2244 | ret = tb_cfg_request(ctl: tb->ctl, req, callback: icm_usb4_switch_nvm_auth_complete, |
2245 | callback_data: auth); |
2246 | mutex_unlock(lock: &icm->request_lock); |
2247 | |
2248 | tb_cfg_request_put(req); |
2249 | if (ret) |
2250 | goto err_free_auth; |
2251 | return 0; |
2252 | |
2253 | err_free_auth: |
2254 | kfree(objp: auth); |
2255 | return ret; |
2256 | } |
2257 | |
2258 | static int icm_usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata, |
2259 | u8 *status, const void *tx_data, size_t tx_data_len, |
2260 | void *rx_data, size_t rx_data_len) |
2261 | { |
2262 | struct icm_usb4_switch_op_response reply; |
2263 | struct icm_usb4_switch_op request; |
2264 | struct tb *tb = sw->tb; |
2265 | struct icm *icm = tb_priv(tb); |
2266 | u64 route = tb_route(sw); |
2267 | int ret; |
2268 | |
2269 | /* |
2270 | * USB4 router operation proxy is supported in firmware if the |
2271 | * protocol version is 3 or higher. |
2272 | */ |
2273 | if (icm->proto_version < 3) |
2274 | return -EOPNOTSUPP; |
2275 | |
2276 | /* |
2277 | * NVM_AUTH is a special USB4 proxy operation that does not |
2278 | * return immediately so handle it separately. |
2279 | */ |
2280 | if (opcode == USB4_SWITCH_OP_NVM_AUTH) |
2281 | return icm_usb4_switch_nvm_authenticate(tb, route); |
2282 | |
2283 | memset(&request, 0, sizeof(request)); |
2284 | request.hdr.code = ICM_USB4_SWITCH_OP; |
2285 | request.route_hi = upper_32_bits(route); |
2286 | request.route_lo = lower_32_bits(route); |
2287 | request.opcode = opcode; |
2288 | if (metadata) |
2289 | request.metadata = *metadata; |
2290 | |
2291 | if (tx_data_len) { |
2292 | request.data_len_valid |= ICM_USB4_SWITCH_DATA_VALID; |
2293 | if (tx_data_len < ARRAY_SIZE(request.data)) |
2294 | request.data_len_valid = |
2295 | tx_data_len & ICM_USB4_SWITCH_DATA_LEN_MASK; |
2296 | memcpy(request.data, tx_data, tx_data_len * sizeof(u32)); |
2297 | } |
2298 | |
2299 | memset(&reply, 0, sizeof(reply)); |
2300 | ret = icm_request(tb, request: &request, request_size: sizeof(request), response: &reply, response_size: sizeof(reply), |
2301 | npackets: 1, ICM_RETRIES, ICM_TIMEOUT); |
2302 | if (ret) |
2303 | return ret; |
2304 | |
2305 | if (reply.hdr.flags & ICM_FLAGS_ERROR) |
2306 | return -EIO; |
2307 | |
2308 | if (status) |
2309 | *status = reply.status; |
2310 | |
2311 | if (metadata) |
2312 | *metadata = reply.metadata; |
2313 | |
2314 | if (rx_data_len) |
2315 | memcpy(rx_data, reply.data, rx_data_len * sizeof(u32)); |
2316 | |
2317 | return 0; |
2318 | } |
2319 | |
2320 | static int icm_usb4_switch_nvm_authenticate_status(struct tb_switch *sw, |
2321 | u32 *status) |
2322 | { |
2323 | struct usb4_switch_nvm_auth *auth; |
2324 | struct tb *tb = sw->tb; |
2325 | struct icm *icm = tb_priv(tb); |
2326 | int ret = 0; |
2327 | |
2328 | if (icm->proto_version < 3) |
2329 | return -EOPNOTSUPP; |
2330 | |
2331 | auth = icm->last_nvm_auth; |
2332 | icm->last_nvm_auth = NULL; |
2333 | |
2334 | if (auth && auth->reply.route_hi == sw->config.route_hi && |
2335 | auth->reply.route_lo == sw->config.route_lo) { |
2336 | tb_dbg(tb, "NVM_AUTH found for %llx flags %#x status %#x\n" , |
2337 | tb_route(sw), auth->reply.hdr.flags, auth->reply.status); |
2338 | if (auth->reply.hdr.flags & ICM_FLAGS_ERROR) |
2339 | ret = -EIO; |
2340 | else |
2341 | *status = auth->reply.status; |
2342 | } else { |
2343 | *status = 0; |
2344 | } |
2345 | |
2346 | kfree(objp: auth); |
2347 | return ret; |
2348 | } |
2349 | |
2350 | /* Falcon Ridge */ |
2351 | static const struct tb_cm_ops icm_fr_ops = { |
2352 | .driver_ready = icm_driver_ready, |
2353 | .start = icm_start, |
2354 | .stop = icm_stop, |
2355 | .suspend = icm_suspend, |
2356 | .complete = icm_complete, |
2357 | .handle_event = icm_handle_event, |
2358 | .approve_switch = icm_fr_approve_switch, |
2359 | .add_switch_key = icm_fr_add_switch_key, |
2360 | .challenge_switch_key = icm_fr_challenge_switch_key, |
2361 | .disconnect_pcie_paths = icm_disconnect_pcie_paths, |
2362 | .approve_xdomain_paths = icm_fr_approve_xdomain_paths, |
2363 | .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths, |
2364 | }; |
2365 | |
2366 | /* Alpine Ridge */ |
2367 | static const struct tb_cm_ops icm_ar_ops = { |
2368 | .driver_ready = icm_driver_ready, |
2369 | .start = icm_start, |
2370 | .stop = icm_stop, |
2371 | .suspend = icm_suspend, |
2372 | .complete = icm_complete, |
2373 | .runtime_suspend = icm_runtime_suspend, |
2374 | .runtime_resume = icm_runtime_resume, |
2375 | .runtime_suspend_switch = icm_runtime_suspend_switch, |
2376 | .runtime_resume_switch = icm_runtime_resume_switch, |
2377 | .handle_event = icm_handle_event, |
2378 | .get_boot_acl = icm_ar_get_boot_acl, |
2379 | .set_boot_acl = icm_ar_set_boot_acl, |
2380 | .approve_switch = icm_fr_approve_switch, |
2381 | .add_switch_key = icm_fr_add_switch_key, |
2382 | .challenge_switch_key = icm_fr_challenge_switch_key, |
2383 | .disconnect_pcie_paths = icm_disconnect_pcie_paths, |
2384 | .approve_xdomain_paths = icm_fr_approve_xdomain_paths, |
2385 | .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths, |
2386 | }; |
2387 | |
2388 | /* Titan Ridge */ |
2389 | static const struct tb_cm_ops icm_tr_ops = { |
2390 | .driver_ready = icm_driver_ready, |
2391 | .start = icm_start, |
2392 | .stop = icm_stop, |
2393 | .suspend = icm_suspend, |
2394 | .complete = icm_complete, |
2395 | .runtime_suspend = icm_runtime_suspend, |
2396 | .runtime_resume = icm_runtime_resume, |
2397 | .runtime_suspend_switch = icm_runtime_suspend_switch, |
2398 | .runtime_resume_switch = icm_runtime_resume_switch, |
2399 | .handle_event = icm_handle_event, |
2400 | .get_boot_acl = icm_ar_get_boot_acl, |
2401 | .set_boot_acl = icm_ar_set_boot_acl, |
2402 | .approve_switch = icm_tr_approve_switch, |
2403 | .add_switch_key = icm_tr_add_switch_key, |
2404 | .challenge_switch_key = icm_tr_challenge_switch_key, |
2405 | .disconnect_pcie_paths = icm_disconnect_pcie_paths, |
2406 | .approve_xdomain_paths = icm_tr_approve_xdomain_paths, |
2407 | .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths, |
2408 | .usb4_switch_op = icm_usb4_switch_op, |
2409 | .usb4_switch_nvm_authenticate_status = |
2410 | icm_usb4_switch_nvm_authenticate_status, |
2411 | }; |
2412 | |
2413 | /* Ice Lake */ |
2414 | static const struct tb_cm_ops icm_icl_ops = { |
2415 | .driver_ready = icm_driver_ready, |
2416 | .start = icm_start, |
2417 | .stop = icm_stop, |
2418 | .complete = icm_complete, |
2419 | .runtime_suspend = icm_runtime_suspend, |
2420 | .runtime_resume = icm_runtime_resume, |
2421 | .handle_event = icm_handle_event, |
2422 | .approve_xdomain_paths = icm_tr_approve_xdomain_paths, |
2423 | .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths, |
2424 | .usb4_switch_op = icm_usb4_switch_op, |
2425 | .usb4_switch_nvm_authenticate_status = |
2426 | icm_usb4_switch_nvm_authenticate_status, |
2427 | }; |
2428 | |
2429 | struct tb *icm_probe(struct tb_nhi *nhi) |
2430 | { |
2431 | struct icm *icm; |
2432 | struct tb *tb; |
2433 | |
2434 | tb = tb_domain_alloc(nhi, ICM_TIMEOUT, privsize: sizeof(struct icm)); |
2435 | if (!tb) |
2436 | return NULL; |
2437 | |
2438 | icm = tb_priv(tb); |
2439 | INIT_DELAYED_WORK(&icm->rescan_work, icm_rescan_work); |
2440 | mutex_init(&icm->request_lock); |
2441 | |
2442 | switch (nhi->pdev->device) { |
2443 | case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI: |
2444 | case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI: |
2445 | icm->can_upgrade_nvm = true; |
2446 | icm->is_supported = icm_fr_is_supported; |
2447 | icm->get_route = icm_fr_get_route; |
2448 | icm->save_devices = icm_fr_save_devices; |
2449 | icm->driver_ready = icm_fr_driver_ready; |
2450 | icm->device_connected = icm_fr_device_connected; |
2451 | icm->device_disconnected = icm_fr_device_disconnected; |
2452 | icm->xdomain_connected = icm_fr_xdomain_connected; |
2453 | icm->xdomain_disconnected = icm_fr_xdomain_disconnected; |
2454 | tb->cm_ops = &icm_fr_ops; |
2455 | break; |
2456 | |
2457 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI: |
2458 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI: |
2459 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI: |
2460 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI: |
2461 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI: |
2462 | icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES; |
2463 | /* |
2464 | * NVM upgrade has not been tested on Apple systems and |
2465 | * they don't provide images publicly either. To be on |
2466 | * the safe side prevent root switch NVM upgrade on Macs |
2467 | * for now. |
2468 | */ |
2469 | icm->can_upgrade_nvm = !x86_apple_machine; |
2470 | icm->is_supported = icm_ar_is_supported; |
2471 | icm->cio_reset = icm_ar_cio_reset; |
2472 | icm->get_mode = icm_ar_get_mode; |
2473 | icm->get_route = icm_ar_get_route; |
2474 | icm->save_devices = icm_fr_save_devices; |
2475 | icm->driver_ready = icm_ar_driver_ready; |
2476 | icm->device_connected = icm_fr_device_connected; |
2477 | icm->device_disconnected = icm_fr_device_disconnected; |
2478 | icm->xdomain_connected = icm_fr_xdomain_connected; |
2479 | icm->xdomain_disconnected = icm_fr_xdomain_disconnected; |
2480 | tb->cm_ops = &icm_ar_ops; |
2481 | break; |
2482 | |
2483 | case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI: |
2484 | case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI: |
2485 | icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES; |
2486 | icm->can_upgrade_nvm = !x86_apple_machine; |
2487 | icm->is_supported = icm_ar_is_supported; |
2488 | icm->cio_reset = icm_tr_cio_reset; |
2489 | icm->get_mode = icm_ar_get_mode; |
2490 | icm->driver_ready = icm_tr_driver_ready; |
2491 | icm->device_connected = icm_tr_device_connected; |
2492 | icm->device_disconnected = icm_tr_device_disconnected; |
2493 | icm->xdomain_connected = icm_tr_xdomain_connected; |
2494 | icm->xdomain_disconnected = icm_tr_xdomain_disconnected; |
2495 | tb->cm_ops = &icm_tr_ops; |
2496 | break; |
2497 | |
2498 | case PCI_DEVICE_ID_INTEL_ICL_NHI0: |
2499 | case PCI_DEVICE_ID_INTEL_ICL_NHI1: |
2500 | icm->is_supported = icm_fr_is_supported; |
2501 | icm->driver_ready = icm_icl_driver_ready; |
2502 | icm->set_uuid = icm_icl_set_uuid; |
2503 | icm->device_connected = icm_icl_device_connected; |
2504 | icm->device_disconnected = icm_tr_device_disconnected; |
2505 | icm->xdomain_connected = icm_tr_xdomain_connected; |
2506 | icm->xdomain_disconnected = icm_tr_xdomain_disconnected; |
2507 | icm->rtd3_veto = icm_icl_rtd3_veto; |
2508 | tb->cm_ops = &icm_icl_ops; |
2509 | break; |
2510 | |
2511 | case PCI_DEVICE_ID_INTEL_TGL_NHI0: |
2512 | case PCI_DEVICE_ID_INTEL_TGL_NHI1: |
2513 | case PCI_DEVICE_ID_INTEL_TGL_H_NHI0: |
2514 | case PCI_DEVICE_ID_INTEL_TGL_H_NHI1: |
2515 | case PCI_DEVICE_ID_INTEL_ADL_NHI0: |
2516 | case PCI_DEVICE_ID_INTEL_ADL_NHI1: |
2517 | case PCI_DEVICE_ID_INTEL_RPL_NHI0: |
2518 | case PCI_DEVICE_ID_INTEL_RPL_NHI1: |
2519 | case PCI_DEVICE_ID_INTEL_MTL_M_NHI0: |
2520 | case PCI_DEVICE_ID_INTEL_MTL_P_NHI0: |
2521 | case PCI_DEVICE_ID_INTEL_MTL_P_NHI1: |
2522 | icm->is_supported = icm_tgl_is_supported; |
2523 | icm->driver_ready = icm_icl_driver_ready; |
2524 | icm->set_uuid = icm_icl_set_uuid; |
2525 | icm->device_connected = icm_icl_device_connected; |
2526 | icm->device_disconnected = icm_tr_device_disconnected; |
2527 | icm->xdomain_connected = icm_tr_xdomain_connected; |
2528 | icm->xdomain_disconnected = icm_tr_xdomain_disconnected; |
2529 | icm->rtd3_veto = icm_icl_rtd3_veto; |
2530 | tb->cm_ops = &icm_icl_ops; |
2531 | break; |
2532 | |
2533 | case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI: |
2534 | case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI: |
2535 | icm->is_supported = icm_tgl_is_supported; |
2536 | icm->get_mode = icm_ar_get_mode; |
2537 | icm->driver_ready = icm_tr_driver_ready; |
2538 | icm->device_connected = icm_tr_device_connected; |
2539 | icm->device_disconnected = icm_tr_device_disconnected; |
2540 | icm->xdomain_connected = icm_tr_xdomain_connected; |
2541 | icm->xdomain_disconnected = icm_tr_xdomain_disconnected; |
2542 | tb->cm_ops = &icm_tr_ops; |
2543 | break; |
2544 | } |
2545 | |
2546 | if (!icm->is_supported || !icm->is_supported(tb)) { |
2547 | dev_dbg(&nhi->pdev->dev, "ICM not supported on this controller\n" ); |
2548 | tb_domain_put(tb); |
2549 | return NULL; |
2550 | } |
2551 | |
2552 | tb_dbg(tb, "using firmware connection manager\n" ); |
2553 | |
2554 | return tb; |
2555 | } |
2556 | |