1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Thunderbolt driver - control channel and configuration commands |
4 | * |
5 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> |
6 | * Copyright (C) 2018, Intel Corporation |
7 | */ |
8 | |
9 | #include <linux/crc32.h> |
10 | #include <linux/delay.h> |
11 | #include <linux/slab.h> |
12 | #include <linux/pci.h> |
13 | #include <linux/dmapool.h> |
14 | #include <linux/workqueue.h> |
15 | |
16 | #include "ctl.h" |
17 | |
18 | |
19 | #define TB_CTL_RX_PKG_COUNT 10 |
20 | #define TB_CTL_RETRIES 4 |
21 | |
22 | /** |
23 | * struct tb_ctl - Thunderbolt control channel |
24 | * @nhi: Pointer to the NHI structure |
25 | * @tx: Transmit ring |
26 | * @rx: Receive ring |
27 | * @frame_pool: DMA pool for control messages |
28 | * @rx_packets: Received control messages |
29 | * @request_queue_lock: Lock protecting @request_queue |
30 | * @request_queue: List of outstanding requests |
31 | * @running: Is the control channel running at the moment |
32 | * @timeout_msec: Default timeout for non-raw control messages |
33 | * @callback: Callback called when hotplug message is received |
34 | * @callback_data: Data passed to @callback |
35 | */ |
36 | struct tb_ctl { |
37 | struct tb_nhi *nhi; |
38 | struct tb_ring *tx; |
39 | struct tb_ring *rx; |
40 | |
41 | struct dma_pool *frame_pool; |
42 | struct ctl_pkg *rx_packets[TB_CTL_RX_PKG_COUNT]; |
43 | struct mutex request_queue_lock; |
44 | struct list_head request_queue; |
45 | bool running; |
46 | |
47 | int timeout_msec; |
48 | event_cb callback; |
49 | void *callback_data; |
50 | }; |
51 | |
52 | |
53 | #define tb_ctl_WARN(ctl, format, arg...) \ |
54 | dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg) |
55 | |
56 | #define tb_ctl_err(ctl, format, arg...) \ |
57 | dev_err(&(ctl)->nhi->pdev->dev, format, ## arg) |
58 | |
59 | #define tb_ctl_warn(ctl, format, arg...) \ |
60 | dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg) |
61 | |
62 | #define tb_ctl_info(ctl, format, arg...) \ |
63 | dev_info(&(ctl)->nhi->pdev->dev, format, ## arg) |
64 | |
65 | #define tb_ctl_dbg(ctl, format, arg...) \ |
66 | dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg) |
67 | |
68 | static DECLARE_WAIT_QUEUE_HEAD(tb_cfg_request_cancel_queue); |
69 | /* Serializes access to request kref_get/put */ |
70 | static DEFINE_MUTEX(tb_cfg_request_lock); |
71 | |
72 | /** |
73 | * tb_cfg_request_alloc() - Allocates a new config request |
74 | * |
75 | * This is refcounted object so when you are done with this, call |
76 | * tb_cfg_request_put() to it. |
77 | */ |
78 | struct tb_cfg_request *tb_cfg_request_alloc(void) |
79 | { |
80 | struct tb_cfg_request *req; |
81 | |
82 | req = kzalloc(size: sizeof(*req), GFP_KERNEL); |
83 | if (!req) |
84 | return NULL; |
85 | |
86 | kref_init(kref: &req->kref); |
87 | |
88 | return req; |
89 | } |
90 | |
91 | /** |
92 | * tb_cfg_request_get() - Increase refcount of a request |
93 | * @req: Request whose refcount is increased |
94 | */ |
95 | void tb_cfg_request_get(struct tb_cfg_request *req) |
96 | { |
97 | mutex_lock(&tb_cfg_request_lock); |
98 | kref_get(kref: &req->kref); |
99 | mutex_unlock(lock: &tb_cfg_request_lock); |
100 | } |
101 | |
102 | static void tb_cfg_request_destroy(struct kref *kref) |
103 | { |
104 | struct tb_cfg_request *req = container_of(kref, typeof(*req), kref); |
105 | |
106 | kfree(objp: req); |
107 | } |
108 | |
109 | /** |
110 | * tb_cfg_request_put() - Decrease refcount and possibly release the request |
111 | * @req: Request whose refcount is decreased |
112 | * |
113 | * Call this function when you are done with the request. When refcount |
114 | * goes to %0 the object is released. |
115 | */ |
116 | void tb_cfg_request_put(struct tb_cfg_request *req) |
117 | { |
118 | mutex_lock(&tb_cfg_request_lock); |
119 | kref_put(kref: &req->kref, release: tb_cfg_request_destroy); |
120 | mutex_unlock(lock: &tb_cfg_request_lock); |
121 | } |
122 | |
123 | static int tb_cfg_request_enqueue(struct tb_ctl *ctl, |
124 | struct tb_cfg_request *req) |
125 | { |
126 | WARN_ON(test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags)); |
127 | WARN_ON(req->ctl); |
128 | |
129 | mutex_lock(&ctl->request_queue_lock); |
130 | if (!ctl->running) { |
131 | mutex_unlock(lock: &ctl->request_queue_lock); |
132 | return -ENOTCONN; |
133 | } |
134 | req->ctl = ctl; |
135 | list_add_tail(new: &req->list, head: &ctl->request_queue); |
136 | set_bit(TB_CFG_REQUEST_ACTIVE, addr: &req->flags); |
137 | mutex_unlock(lock: &ctl->request_queue_lock); |
138 | return 0; |
139 | } |
140 | |
141 | static void tb_cfg_request_dequeue(struct tb_cfg_request *req) |
142 | { |
143 | struct tb_ctl *ctl = req->ctl; |
144 | |
145 | mutex_lock(&ctl->request_queue_lock); |
146 | list_del(entry: &req->list); |
147 | clear_bit(TB_CFG_REQUEST_ACTIVE, addr: &req->flags); |
148 | if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags)) |
149 | wake_up(&tb_cfg_request_cancel_queue); |
150 | mutex_unlock(lock: &ctl->request_queue_lock); |
151 | } |
152 | |
153 | static bool tb_cfg_request_is_active(struct tb_cfg_request *req) |
154 | { |
155 | return test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags); |
156 | } |
157 | |
158 | static struct tb_cfg_request * |
159 | tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg) |
160 | { |
161 | struct tb_cfg_request *req = NULL, *iter; |
162 | |
163 | mutex_lock(&pkg->ctl->request_queue_lock); |
164 | list_for_each_entry(iter, &pkg->ctl->request_queue, list) { |
165 | tb_cfg_request_get(req: iter); |
166 | if (iter->match(iter, pkg)) { |
167 | req = iter; |
168 | break; |
169 | } |
170 | tb_cfg_request_put(req: iter); |
171 | } |
172 | mutex_unlock(lock: &pkg->ctl->request_queue_lock); |
173 | |
174 | return req; |
175 | } |
176 | |
177 | /* utility functions */ |
178 | |
179 | |
180 | static int (const struct ctl_pkg *pkg, u32 len, |
181 | enum tb_cfg_pkg_type type, u64 route) |
182 | { |
183 | struct tb_cfg_header * = pkg->buffer; |
184 | |
185 | /* check frame, TODO: frame flags */ |
186 | if (WARN(len != pkg->frame.size, |
187 | "wrong framesize (expected %#x, got %#x)\n" , |
188 | len, pkg->frame.size)) |
189 | return -EIO; |
190 | if (WARN(type != pkg->frame.eof, "wrong eof (expected %#x, got %#x)\n" , |
191 | type, pkg->frame.eof)) |
192 | return -EIO; |
193 | if (WARN(pkg->frame.sof, "wrong sof (expected 0x0, got %#x)\n" , |
194 | pkg->frame.sof)) |
195 | return -EIO; |
196 | |
197 | /* check header */ |
198 | if (WARN(header->unknown != 1 << 9, |
199 | "header->unknown is %#x\n" , header->unknown)) |
200 | return -EIO; |
201 | if (WARN(route != tb_cfg_get_route(header), |
202 | "wrong route (expected %llx, got %llx)" , |
203 | route, tb_cfg_get_route(header))) |
204 | return -EIO; |
205 | return 0; |
206 | } |
207 | |
208 | static int check_config_address(struct tb_cfg_address addr, |
209 | enum tb_cfg_space space, u32 offset, |
210 | u32 length) |
211 | { |
212 | if (WARN(addr.zero, "addr.zero is %#x\n" , addr.zero)) |
213 | return -EIO; |
214 | if (WARN(space != addr.space, "wrong space (expected %x, got %x\n)" , |
215 | space, addr.space)) |
216 | return -EIO; |
217 | if (WARN(offset != addr.offset, "wrong offset (expected %x, got %x\n)" , |
218 | offset, addr.offset)) |
219 | return -EIO; |
220 | if (WARN(length != addr.length, "wrong space (expected %x, got %x\n)" , |
221 | length, addr.length)) |
222 | return -EIO; |
223 | /* |
224 | * We cannot check addr->port as it is set to the upstream port of the |
225 | * sender. |
226 | */ |
227 | return 0; |
228 | } |
229 | |
230 | static struct tb_cfg_result decode_error(const struct ctl_pkg *response) |
231 | { |
232 | struct cfg_error_pkg *pkg = response->buffer; |
233 | struct tb_cfg_result res = { 0 }; |
234 | res.response_route = tb_cfg_get_route(header: &pkg->header); |
235 | res.response_port = 0; |
236 | res.err = check_header(pkg: response, len: sizeof(*pkg), type: TB_CFG_PKG_ERROR, |
237 | route: tb_cfg_get_route(header: &pkg->header)); |
238 | if (res.err) |
239 | return res; |
240 | |
241 | res.err = 1; |
242 | res.tb_error = pkg->error; |
243 | res.response_port = pkg->port; |
244 | return res; |
245 | |
246 | } |
247 | |
248 | static struct tb_cfg_result (const struct ctl_pkg *pkg, u32 len, |
249 | enum tb_cfg_pkg_type type, u64 route) |
250 | { |
251 | struct tb_cfg_header * = pkg->buffer; |
252 | struct tb_cfg_result res = { 0 }; |
253 | |
254 | if (pkg->frame.eof == TB_CFG_PKG_ERROR) |
255 | return decode_error(response: pkg); |
256 | |
257 | res.response_port = 0; /* will be updated later for cfg_read/write */ |
258 | res.response_route = tb_cfg_get_route(header); |
259 | res.err = check_header(pkg, len, type, route); |
260 | return res; |
261 | } |
262 | |
263 | static void tb_cfg_print_error(struct tb_ctl *ctl, |
264 | const struct tb_cfg_result *res) |
265 | { |
266 | WARN_ON(res->err != 1); |
267 | switch (res->tb_error) { |
268 | case TB_CFG_ERROR_PORT_NOT_CONNECTED: |
269 | /* Port is not connected. This can happen during surprise |
270 | * removal. Do not warn. */ |
271 | return; |
272 | case TB_CFG_ERROR_INVALID_CONFIG_SPACE: |
273 | /* |
274 | * Invalid cfg_space/offset/length combination in |
275 | * cfg_read/cfg_write. |
276 | */ |
277 | tb_ctl_dbg(ctl, "%llx:%x: invalid config space or offset\n" , |
278 | res->response_route, res->response_port); |
279 | return; |
280 | case TB_CFG_ERROR_NO_SUCH_PORT: |
281 | /* |
282 | * - The route contains a non-existent port. |
283 | * - The route contains a non-PHY port (e.g. PCIe). |
284 | * - The port in cfg_read/cfg_write does not exist. |
285 | */ |
286 | tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Invalid port\n" , |
287 | res->response_route, res->response_port); |
288 | return; |
289 | case TB_CFG_ERROR_LOOP: |
290 | tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n" , |
291 | res->response_route, res->response_port); |
292 | return; |
293 | case TB_CFG_ERROR_LOCK: |
294 | tb_ctl_warn(ctl, "%llx:%x: downstream port is locked\n" , |
295 | res->response_route, res->response_port); |
296 | return; |
297 | default: |
298 | /* 5,6,7,9 and 11 are also valid error codes */ |
299 | tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n" , |
300 | res->response_route, res->response_port); |
301 | return; |
302 | } |
303 | } |
304 | |
305 | static __be32 tb_crc(const void *data, size_t len) |
306 | { |
307 | return cpu_to_be32(~__crc32c_le(~0, data, len)); |
308 | } |
309 | |
310 | static void tb_ctl_pkg_free(struct ctl_pkg *pkg) |
311 | { |
312 | if (pkg) { |
313 | dma_pool_free(pool: pkg->ctl->frame_pool, |
314 | vaddr: pkg->buffer, addr: pkg->frame.buffer_phy); |
315 | kfree(objp: pkg); |
316 | } |
317 | } |
318 | |
319 | static struct ctl_pkg *tb_ctl_pkg_alloc(struct tb_ctl *ctl) |
320 | { |
321 | struct ctl_pkg *pkg = kzalloc(size: sizeof(*pkg), GFP_KERNEL); |
322 | if (!pkg) |
323 | return NULL; |
324 | pkg->ctl = ctl; |
325 | pkg->buffer = dma_pool_alloc(pool: ctl->frame_pool, GFP_KERNEL, |
326 | handle: &pkg->frame.buffer_phy); |
327 | if (!pkg->buffer) { |
328 | kfree(objp: pkg); |
329 | return NULL; |
330 | } |
331 | return pkg; |
332 | } |
333 | |
334 | |
335 | /* RX/TX handling */ |
336 | |
337 | static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame, |
338 | bool canceled) |
339 | { |
340 | struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame); |
341 | tb_ctl_pkg_free(pkg); |
342 | } |
343 | |
344 | /* |
345 | * tb_cfg_tx() - transmit a packet on the control channel |
346 | * |
347 | * len must be a multiple of four. |
348 | * |
349 | * Return: Returns 0 on success or an error code on failure. |
350 | */ |
351 | static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len, |
352 | enum tb_cfg_pkg_type type) |
353 | { |
354 | int res; |
355 | struct ctl_pkg *pkg; |
356 | if (len % 4 != 0) { /* required for le->be conversion */ |
357 | tb_ctl_WARN(ctl, "TX: invalid size: %zu\n" , len); |
358 | return -EINVAL; |
359 | } |
360 | if (len > TB_FRAME_SIZE - 4) { /* checksum is 4 bytes */ |
361 | tb_ctl_WARN(ctl, "TX: packet too large: %zu/%d\n" , |
362 | len, TB_FRAME_SIZE - 4); |
363 | return -EINVAL; |
364 | } |
365 | pkg = tb_ctl_pkg_alloc(ctl); |
366 | if (!pkg) |
367 | return -ENOMEM; |
368 | pkg->frame.callback = tb_ctl_tx_callback; |
369 | pkg->frame.size = len + 4; |
370 | pkg->frame.sof = type; |
371 | pkg->frame.eof = type; |
372 | cpu_to_be32_array(dst: pkg->buffer, src: data, len: len / 4); |
373 | *(__be32 *) (pkg->buffer + len) = tb_crc(data: pkg->buffer, len); |
374 | |
375 | res = tb_ring_tx(ring: ctl->tx, frame: &pkg->frame); |
376 | if (res) /* ring is stopped */ |
377 | tb_ctl_pkg_free(pkg); |
378 | return res; |
379 | } |
380 | |
381 | /* |
382 | * tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback |
383 | */ |
384 | static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type, |
385 | struct ctl_pkg *pkg, size_t size) |
386 | { |
387 | return ctl->callback(ctl->callback_data, type, pkg->buffer, size); |
388 | } |
389 | |
390 | static void tb_ctl_rx_submit(struct ctl_pkg *pkg) |
391 | { |
392 | tb_ring_rx(ring: pkg->ctl->rx, frame: &pkg->frame); /* |
393 | * We ignore failures during stop. |
394 | * All rx packets are referenced |
395 | * from ctl->rx_packets, so we do |
396 | * not loose them. |
397 | */ |
398 | } |
399 | |
400 | static int tb_async_error(const struct ctl_pkg *pkg) |
401 | { |
402 | const struct cfg_error_pkg *error = pkg->buffer; |
403 | |
404 | if (pkg->frame.eof != TB_CFG_PKG_ERROR) |
405 | return false; |
406 | |
407 | switch (error->error) { |
408 | case TB_CFG_ERROR_LINK_ERROR: |
409 | case TB_CFG_ERROR_HEC_ERROR_DETECTED: |
410 | case TB_CFG_ERROR_FLOW_CONTROL_ERROR: |
411 | case TB_CFG_ERROR_DP_BW: |
412 | case TB_CFG_ERROR_ROP_CMPLT: |
413 | case TB_CFG_ERROR_POP_CMPLT: |
414 | case TB_CFG_ERROR_PCIE_WAKE: |
415 | case TB_CFG_ERROR_DP_CON_CHANGE: |
416 | case TB_CFG_ERROR_DPTX_DISCOVERY: |
417 | case TB_CFG_ERROR_LINK_RECOVERY: |
418 | case TB_CFG_ERROR_ASYM_LINK: |
419 | return true; |
420 | |
421 | default: |
422 | return false; |
423 | } |
424 | } |
425 | |
426 | static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame, |
427 | bool canceled) |
428 | { |
429 | struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame); |
430 | struct tb_cfg_request *req; |
431 | __be32 crc32; |
432 | |
433 | if (canceled) |
434 | return; /* |
435 | * ring is stopped, packet is referenced from |
436 | * ctl->rx_packets. |
437 | */ |
438 | |
439 | if (frame->size < 4 || frame->size % 4 != 0) { |
440 | tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n" , |
441 | frame->size); |
442 | goto rx; |
443 | } |
444 | |
445 | frame->size -= 4; /* remove checksum */ |
446 | crc32 = tb_crc(data: pkg->buffer, len: frame->size); |
447 | be32_to_cpu_array(dst: pkg->buffer, src: pkg->buffer, len: frame->size / 4); |
448 | |
449 | switch (frame->eof) { |
450 | case TB_CFG_PKG_READ: |
451 | case TB_CFG_PKG_WRITE: |
452 | case TB_CFG_PKG_ERROR: |
453 | case TB_CFG_PKG_OVERRIDE: |
454 | case TB_CFG_PKG_RESET: |
455 | if (*(__be32 *)(pkg->buffer + frame->size) != crc32) { |
456 | tb_ctl_err(pkg->ctl, |
457 | "RX: checksum mismatch, dropping packet\n" ); |
458 | goto rx; |
459 | } |
460 | if (tb_async_error(pkg)) { |
461 | tb_ctl_handle_event(ctl: pkg->ctl, type: frame->eof, |
462 | pkg, size: frame->size); |
463 | goto rx; |
464 | } |
465 | break; |
466 | |
467 | case TB_CFG_PKG_EVENT: |
468 | case TB_CFG_PKG_XDOMAIN_RESP: |
469 | case TB_CFG_PKG_XDOMAIN_REQ: |
470 | if (*(__be32 *)(pkg->buffer + frame->size) != crc32) { |
471 | tb_ctl_err(pkg->ctl, |
472 | "RX: checksum mismatch, dropping packet\n" ); |
473 | goto rx; |
474 | } |
475 | fallthrough; |
476 | case TB_CFG_PKG_ICM_EVENT: |
477 | if (tb_ctl_handle_event(ctl: pkg->ctl, type: frame->eof, pkg, size: frame->size)) |
478 | goto rx; |
479 | break; |
480 | |
481 | default: |
482 | break; |
483 | } |
484 | |
485 | /* |
486 | * The received packet will be processed only if there is an |
487 | * active request and that the packet is what is expected. This |
488 | * prevents packets such as replies coming after timeout has |
489 | * triggered from messing with the active requests. |
490 | */ |
491 | req = tb_cfg_request_find(ctl: pkg->ctl, pkg); |
492 | if (req) { |
493 | if (req->copy(req, pkg)) |
494 | schedule_work(work: &req->work); |
495 | tb_cfg_request_put(req); |
496 | } |
497 | |
498 | rx: |
499 | tb_ctl_rx_submit(pkg); |
500 | } |
501 | |
502 | static void tb_cfg_request_work(struct work_struct *work) |
503 | { |
504 | struct tb_cfg_request *req = container_of(work, typeof(*req), work); |
505 | |
506 | if (!test_bit(TB_CFG_REQUEST_CANCELED, &req->flags)) |
507 | req->callback(req->callback_data); |
508 | |
509 | tb_cfg_request_dequeue(req); |
510 | tb_cfg_request_put(req); |
511 | } |
512 | |
513 | /** |
514 | * tb_cfg_request() - Start control request not waiting for it to complete |
515 | * @ctl: Control channel to use |
516 | * @req: Request to start |
517 | * @callback: Callback called when the request is completed |
518 | * @callback_data: Data to be passed to @callback |
519 | * |
520 | * This queues @req on the given control channel without waiting for it |
521 | * to complete. When the request completes @callback is called. |
522 | */ |
523 | int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req, |
524 | void (*callback)(void *), void *callback_data) |
525 | { |
526 | int ret; |
527 | |
528 | req->flags = 0; |
529 | req->callback = callback; |
530 | req->callback_data = callback_data; |
531 | INIT_WORK(&req->work, tb_cfg_request_work); |
532 | INIT_LIST_HEAD(list: &req->list); |
533 | |
534 | tb_cfg_request_get(req); |
535 | ret = tb_cfg_request_enqueue(ctl, req); |
536 | if (ret) |
537 | goto err_put; |
538 | |
539 | ret = tb_ctl_tx(ctl, data: req->request, len: req->request_size, |
540 | type: req->request_type); |
541 | if (ret) |
542 | goto err_dequeue; |
543 | |
544 | if (!req->response) |
545 | schedule_work(work: &req->work); |
546 | |
547 | return 0; |
548 | |
549 | err_dequeue: |
550 | tb_cfg_request_dequeue(req); |
551 | err_put: |
552 | tb_cfg_request_put(req); |
553 | |
554 | return ret; |
555 | } |
556 | |
557 | /** |
558 | * tb_cfg_request_cancel() - Cancel a control request |
559 | * @req: Request to cancel |
560 | * @err: Error to assign to the request |
561 | * |
562 | * This function can be used to cancel ongoing request. It will wait |
563 | * until the request is not active anymore. |
564 | */ |
565 | void tb_cfg_request_cancel(struct tb_cfg_request *req, int err) |
566 | { |
567 | set_bit(TB_CFG_REQUEST_CANCELED, addr: &req->flags); |
568 | schedule_work(work: &req->work); |
569 | wait_event(tb_cfg_request_cancel_queue, !tb_cfg_request_is_active(req)); |
570 | req->result.err = err; |
571 | } |
572 | |
573 | static void tb_cfg_request_complete(void *data) |
574 | { |
575 | complete(data); |
576 | } |
577 | |
578 | /** |
579 | * tb_cfg_request_sync() - Start control request and wait until it completes |
580 | * @ctl: Control channel to use |
581 | * @req: Request to start |
582 | * @timeout_msec: Timeout how long to wait @req to complete |
583 | * |
584 | * Starts a control request and waits until it completes. If timeout |
585 | * triggers the request is canceled before function returns. Note the |
586 | * caller needs to make sure only one message for given switch is active |
587 | * at a time. |
588 | */ |
589 | struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl, |
590 | struct tb_cfg_request *req, |
591 | int timeout_msec) |
592 | { |
593 | unsigned long timeout = msecs_to_jiffies(m: timeout_msec); |
594 | struct tb_cfg_result res = { 0 }; |
595 | DECLARE_COMPLETION_ONSTACK(done); |
596 | int ret; |
597 | |
598 | ret = tb_cfg_request(ctl, req, callback: tb_cfg_request_complete, callback_data: &done); |
599 | if (ret) { |
600 | res.err = ret; |
601 | return res; |
602 | } |
603 | |
604 | if (!wait_for_completion_timeout(x: &done, timeout)) |
605 | tb_cfg_request_cancel(req, err: -ETIMEDOUT); |
606 | |
607 | flush_work(work: &req->work); |
608 | |
609 | return req->result; |
610 | } |
611 | |
612 | /* public interface, alloc/start/stop/free */ |
613 | |
614 | /** |
615 | * tb_ctl_alloc() - allocate a control channel |
616 | * @nhi: Pointer to NHI |
617 | * @timeout_msec: Default timeout used with non-raw control messages |
618 | * @cb: Callback called for plug events |
619 | * @cb_data: Data passed to @cb |
620 | * |
621 | * cb will be invoked once for every hot plug event. |
622 | * |
623 | * Return: Returns a pointer on success or NULL on failure. |
624 | */ |
625 | struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int timeout_msec, event_cb cb, |
626 | void *cb_data) |
627 | { |
628 | int i; |
629 | struct tb_ctl *ctl = kzalloc(size: sizeof(*ctl), GFP_KERNEL); |
630 | if (!ctl) |
631 | return NULL; |
632 | ctl->nhi = nhi; |
633 | ctl->timeout_msec = timeout_msec; |
634 | ctl->callback = cb; |
635 | ctl->callback_data = cb_data; |
636 | |
637 | mutex_init(&ctl->request_queue_lock); |
638 | INIT_LIST_HEAD(list: &ctl->request_queue); |
639 | ctl->frame_pool = dma_pool_create(name: "thunderbolt_ctl" , dev: &nhi->pdev->dev, |
640 | TB_FRAME_SIZE, align: 4, allocation: 0); |
641 | if (!ctl->frame_pool) |
642 | goto err; |
643 | |
644 | ctl->tx = tb_ring_alloc_tx(nhi, hop: 0, size: 10, RING_FLAG_NO_SUSPEND); |
645 | if (!ctl->tx) |
646 | goto err; |
647 | |
648 | ctl->rx = tb_ring_alloc_rx(nhi, hop: 0, size: 10, RING_FLAG_NO_SUSPEND, e2e_tx_hop: 0, sof_mask: 0xffff, |
649 | eof_mask: 0xffff, NULL, NULL); |
650 | if (!ctl->rx) |
651 | goto err; |
652 | |
653 | for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) { |
654 | ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl); |
655 | if (!ctl->rx_packets[i]) |
656 | goto err; |
657 | ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback; |
658 | } |
659 | |
660 | tb_ctl_dbg(ctl, "control channel created\n" ); |
661 | return ctl; |
662 | err: |
663 | tb_ctl_free(ctl); |
664 | return NULL; |
665 | } |
666 | |
667 | /** |
668 | * tb_ctl_free() - free a control channel |
669 | * @ctl: Control channel to free |
670 | * |
671 | * Must be called after tb_ctl_stop. |
672 | * |
673 | * Must NOT be called from ctl->callback. |
674 | */ |
675 | void tb_ctl_free(struct tb_ctl *ctl) |
676 | { |
677 | int i; |
678 | |
679 | if (!ctl) |
680 | return; |
681 | |
682 | if (ctl->rx) |
683 | tb_ring_free(ring: ctl->rx); |
684 | if (ctl->tx) |
685 | tb_ring_free(ring: ctl->tx); |
686 | |
687 | /* free RX packets */ |
688 | for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) |
689 | tb_ctl_pkg_free(pkg: ctl->rx_packets[i]); |
690 | |
691 | |
692 | dma_pool_destroy(pool: ctl->frame_pool); |
693 | kfree(objp: ctl); |
694 | } |
695 | |
696 | /** |
697 | * tb_ctl_start() - start/resume the control channel |
698 | * @ctl: Control channel to start |
699 | */ |
700 | void tb_ctl_start(struct tb_ctl *ctl) |
701 | { |
702 | int i; |
703 | tb_ctl_dbg(ctl, "control channel starting...\n" ); |
704 | tb_ring_start(ring: ctl->tx); /* is used to ack hotplug packets, start first */ |
705 | tb_ring_start(ring: ctl->rx); |
706 | for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) |
707 | tb_ctl_rx_submit(pkg: ctl->rx_packets[i]); |
708 | |
709 | ctl->running = true; |
710 | } |
711 | |
712 | /** |
713 | * tb_ctl_stop() - pause the control channel |
714 | * @ctl: Control channel to stop |
715 | * |
716 | * All invocations of ctl->callback will have finished after this method |
717 | * returns. |
718 | * |
719 | * Must NOT be called from ctl->callback. |
720 | */ |
721 | void tb_ctl_stop(struct tb_ctl *ctl) |
722 | { |
723 | mutex_lock(&ctl->request_queue_lock); |
724 | ctl->running = false; |
725 | mutex_unlock(lock: &ctl->request_queue_lock); |
726 | |
727 | tb_ring_stop(ring: ctl->rx); |
728 | tb_ring_stop(ring: ctl->tx); |
729 | |
730 | if (!list_empty(head: &ctl->request_queue)) |
731 | tb_ctl_WARN(ctl, "dangling request in request_queue\n" ); |
732 | INIT_LIST_HEAD(list: &ctl->request_queue); |
733 | tb_ctl_dbg(ctl, "control channel stopped\n" ); |
734 | } |
735 | |
736 | /* public interface, commands */ |
737 | |
738 | /** |
739 | * tb_cfg_ack_notification() - Ack notification |
740 | * @ctl: Control channel to use |
741 | * @route: Router that originated the event |
742 | * @error: Pointer to the notification package |
743 | * |
744 | * Call this as response for non-plug notification to ack it. Returns |
745 | * %0 on success or an error code on failure. |
746 | */ |
747 | int tb_cfg_ack_notification(struct tb_ctl *ctl, u64 route, |
748 | const struct cfg_error_pkg *error) |
749 | { |
750 | struct cfg_ack_pkg pkg = { |
751 | .header = tb_cfg_make_header(route), |
752 | }; |
753 | const char *name; |
754 | |
755 | switch (error->error) { |
756 | case TB_CFG_ERROR_LINK_ERROR: |
757 | name = "link error" ; |
758 | break; |
759 | case TB_CFG_ERROR_HEC_ERROR_DETECTED: |
760 | name = "HEC error" ; |
761 | break; |
762 | case TB_CFG_ERROR_FLOW_CONTROL_ERROR: |
763 | name = "flow control error" ; |
764 | break; |
765 | case TB_CFG_ERROR_DP_BW: |
766 | name = "DP_BW" ; |
767 | break; |
768 | case TB_CFG_ERROR_ROP_CMPLT: |
769 | name = "router operation completion" ; |
770 | break; |
771 | case TB_CFG_ERROR_POP_CMPLT: |
772 | name = "port operation completion" ; |
773 | break; |
774 | case TB_CFG_ERROR_PCIE_WAKE: |
775 | name = "PCIe wake" ; |
776 | break; |
777 | case TB_CFG_ERROR_DP_CON_CHANGE: |
778 | name = "DP connector change" ; |
779 | break; |
780 | case TB_CFG_ERROR_DPTX_DISCOVERY: |
781 | name = "DPTX discovery" ; |
782 | break; |
783 | case TB_CFG_ERROR_LINK_RECOVERY: |
784 | name = "link recovery" ; |
785 | break; |
786 | case TB_CFG_ERROR_ASYM_LINK: |
787 | name = "asymmetric link" ; |
788 | break; |
789 | default: |
790 | name = "unknown" ; |
791 | break; |
792 | } |
793 | |
794 | tb_ctl_dbg(ctl, "acking %s (%#x) notification on %llx\n" , name, |
795 | error->error, route); |
796 | |
797 | return tb_ctl_tx(ctl, data: &pkg, len: sizeof(pkg), type: TB_CFG_PKG_NOTIFY_ACK); |
798 | } |
799 | |
800 | /** |
801 | * tb_cfg_ack_plug() - Ack hot plug/unplug event |
802 | * @ctl: Control channel to use |
803 | * @route: Router that originated the event |
804 | * @port: Port where the hot plug/unplug happened |
805 | * @unplug: Ack hot plug or unplug |
806 | * |
807 | * Call this as response for hot plug/unplug event to ack it. |
808 | * Returns %0 on success or an error code on failure. |
809 | */ |
810 | int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug) |
811 | { |
812 | struct cfg_error_pkg pkg = { |
813 | .header = tb_cfg_make_header(route), |
814 | .port = port, |
815 | .error = TB_CFG_ERROR_ACK_PLUG_EVENT, |
816 | .pg = unplug ? TB_CFG_ERROR_PG_HOT_UNPLUG |
817 | : TB_CFG_ERROR_PG_HOT_PLUG, |
818 | }; |
819 | tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%u\n" , |
820 | unplug ? "un" : "" , route, port); |
821 | return tb_ctl_tx(ctl, data: &pkg, len: sizeof(pkg), type: TB_CFG_PKG_ERROR); |
822 | } |
823 | |
824 | static bool tb_cfg_match(const struct tb_cfg_request *req, |
825 | const struct ctl_pkg *pkg) |
826 | { |
827 | u64 route = tb_cfg_get_route(header: pkg->buffer) & ~BIT_ULL(63); |
828 | |
829 | if (pkg->frame.eof == TB_CFG_PKG_ERROR) |
830 | return true; |
831 | |
832 | if (pkg->frame.eof != req->response_type) |
833 | return false; |
834 | if (route != tb_cfg_get_route(header: req->request)) |
835 | return false; |
836 | if (pkg->frame.size != req->response_size) |
837 | return false; |
838 | |
839 | if (pkg->frame.eof == TB_CFG_PKG_READ || |
840 | pkg->frame.eof == TB_CFG_PKG_WRITE) { |
841 | const struct cfg_read_pkg *req_hdr = req->request; |
842 | const struct cfg_read_pkg *res_hdr = pkg->buffer; |
843 | |
844 | if (req_hdr->addr.seq != res_hdr->addr.seq) |
845 | return false; |
846 | } |
847 | |
848 | return true; |
849 | } |
850 | |
851 | static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg) |
852 | { |
853 | struct tb_cfg_result res; |
854 | |
855 | /* Now make sure it is in expected format */ |
856 | res = parse_header(pkg, len: req->response_size, type: req->response_type, |
857 | route: tb_cfg_get_route(header: req->request)); |
858 | if (!res.err) |
859 | memcpy(req->response, pkg->buffer, req->response_size); |
860 | |
861 | req->result = res; |
862 | |
863 | /* Always complete when first response is received */ |
864 | return true; |
865 | } |
866 | |
867 | /** |
868 | * tb_cfg_reset() - send a reset packet and wait for a response |
869 | * @ctl: Control channel pointer |
870 | * @route: Router string for the router to send reset |
871 | * |
872 | * If the switch at route is incorrectly configured then we will not receive a |
873 | * reply (even though the switch will reset). The caller should check for |
874 | * -ETIMEDOUT and attempt to reconfigure the switch. |
875 | */ |
876 | struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route) |
877 | { |
878 | struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) }; |
879 | struct tb_cfg_result res = { 0 }; |
880 | struct tb_cfg_header reply; |
881 | struct tb_cfg_request *req; |
882 | |
883 | req = tb_cfg_request_alloc(); |
884 | if (!req) { |
885 | res.err = -ENOMEM; |
886 | return res; |
887 | } |
888 | |
889 | req->match = tb_cfg_match; |
890 | req->copy = tb_cfg_copy; |
891 | req->request = &request; |
892 | req->request_size = sizeof(request); |
893 | req->request_type = TB_CFG_PKG_RESET; |
894 | req->response = &reply; |
895 | req->response_size = sizeof(reply); |
896 | req->response_type = TB_CFG_PKG_RESET; |
897 | |
898 | res = tb_cfg_request_sync(ctl, req, timeout_msec: ctl->timeout_msec); |
899 | |
900 | tb_cfg_request_put(req); |
901 | |
902 | return res; |
903 | } |
904 | |
905 | /** |
906 | * tb_cfg_read_raw() - read from config space into buffer |
907 | * @ctl: Pointer to the control channel |
908 | * @buffer: Buffer where the data is read |
909 | * @route: Route string of the router |
910 | * @port: Port number when reading from %TB_CFG_PORT, %0 otherwise |
911 | * @space: Config space selector |
912 | * @offset: Dword word offset of the register to start reading |
913 | * @length: Number of dwords to read |
914 | * @timeout_msec: Timeout in ms how long to wait for the response |
915 | * |
916 | * Reads from router config space without translating the possible error. |
917 | */ |
918 | struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer, |
919 | u64 route, u32 port, enum tb_cfg_space space, |
920 | u32 offset, u32 length, int timeout_msec) |
921 | { |
922 | struct tb_cfg_result res = { 0 }; |
923 | struct cfg_read_pkg request = { |
924 | .header = tb_cfg_make_header(route), |
925 | .addr = { |
926 | .port = port, |
927 | .space = space, |
928 | .offset = offset, |
929 | .length = length, |
930 | }, |
931 | }; |
932 | struct cfg_write_pkg reply; |
933 | int retries = 0; |
934 | |
935 | while (retries < TB_CTL_RETRIES) { |
936 | struct tb_cfg_request *req; |
937 | |
938 | req = tb_cfg_request_alloc(); |
939 | if (!req) { |
940 | res.err = -ENOMEM; |
941 | return res; |
942 | } |
943 | |
944 | request.addr.seq = retries++; |
945 | |
946 | req->match = tb_cfg_match; |
947 | req->copy = tb_cfg_copy; |
948 | req->request = &request; |
949 | req->request_size = sizeof(request); |
950 | req->request_type = TB_CFG_PKG_READ; |
951 | req->response = &reply; |
952 | req->response_size = 12 + 4 * length; |
953 | req->response_type = TB_CFG_PKG_READ; |
954 | |
955 | res = tb_cfg_request_sync(ctl, req, timeout_msec); |
956 | |
957 | tb_cfg_request_put(req); |
958 | |
959 | if (res.err != -ETIMEDOUT) |
960 | break; |
961 | |
962 | /* Wait a bit (arbitrary time) until we send a retry */ |
963 | usleep_range(min: 10, max: 100); |
964 | } |
965 | |
966 | if (res.err) |
967 | return res; |
968 | |
969 | res.response_port = reply.addr.port; |
970 | res.err = check_config_address(addr: reply.addr, space, offset, length); |
971 | if (!res.err) |
972 | memcpy(buffer, &reply.data, 4 * length); |
973 | return res; |
974 | } |
975 | |
976 | /** |
977 | * tb_cfg_write_raw() - write from buffer into config space |
978 | * @ctl: Pointer to the control channel |
979 | * @buffer: Data to write |
980 | * @route: Route string of the router |
981 | * @port: Port number when writing to %TB_CFG_PORT, %0 otherwise |
982 | * @space: Config space selector |
983 | * @offset: Dword word offset of the register to start writing |
984 | * @length: Number of dwords to write |
985 | * @timeout_msec: Timeout in ms how long to wait for the response |
986 | * |
987 | * Writes to router config space without translating the possible error. |
988 | */ |
989 | struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer, |
990 | u64 route, u32 port, enum tb_cfg_space space, |
991 | u32 offset, u32 length, int timeout_msec) |
992 | { |
993 | struct tb_cfg_result res = { 0 }; |
994 | struct cfg_write_pkg request = { |
995 | .header = tb_cfg_make_header(route), |
996 | .addr = { |
997 | .port = port, |
998 | .space = space, |
999 | .offset = offset, |
1000 | .length = length, |
1001 | }, |
1002 | }; |
1003 | struct cfg_read_pkg reply; |
1004 | int retries = 0; |
1005 | |
1006 | memcpy(&request.data, buffer, length * 4); |
1007 | |
1008 | while (retries < TB_CTL_RETRIES) { |
1009 | struct tb_cfg_request *req; |
1010 | |
1011 | req = tb_cfg_request_alloc(); |
1012 | if (!req) { |
1013 | res.err = -ENOMEM; |
1014 | return res; |
1015 | } |
1016 | |
1017 | request.addr.seq = retries++; |
1018 | |
1019 | req->match = tb_cfg_match; |
1020 | req->copy = tb_cfg_copy; |
1021 | req->request = &request; |
1022 | req->request_size = 12 + 4 * length; |
1023 | req->request_type = TB_CFG_PKG_WRITE; |
1024 | req->response = &reply; |
1025 | req->response_size = sizeof(reply); |
1026 | req->response_type = TB_CFG_PKG_WRITE; |
1027 | |
1028 | res = tb_cfg_request_sync(ctl, req, timeout_msec); |
1029 | |
1030 | tb_cfg_request_put(req); |
1031 | |
1032 | if (res.err != -ETIMEDOUT) |
1033 | break; |
1034 | |
1035 | /* Wait a bit (arbitrary time) until we send a retry */ |
1036 | usleep_range(min: 10, max: 100); |
1037 | } |
1038 | |
1039 | if (res.err) |
1040 | return res; |
1041 | |
1042 | res.response_port = reply.addr.port; |
1043 | res.err = check_config_address(addr: reply.addr, space, offset, length); |
1044 | return res; |
1045 | } |
1046 | |
1047 | static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space, |
1048 | const struct tb_cfg_result *res) |
1049 | { |
1050 | /* |
1051 | * For unimplemented ports access to port config space may return |
1052 | * TB_CFG_ERROR_INVALID_CONFIG_SPACE (alternatively their type is |
1053 | * set to TB_TYPE_INACTIVE). In the former case return -ENODEV so |
1054 | * that the caller can mark the port as disabled. |
1055 | */ |
1056 | if (space == TB_CFG_PORT && |
1057 | res->tb_error == TB_CFG_ERROR_INVALID_CONFIG_SPACE) |
1058 | return -ENODEV; |
1059 | |
1060 | tb_cfg_print_error(ctl, res); |
1061 | |
1062 | if (res->tb_error == TB_CFG_ERROR_LOCK) |
1063 | return -EACCES; |
1064 | if (res->tb_error == TB_CFG_ERROR_PORT_NOT_CONNECTED) |
1065 | return -ENOTCONN; |
1066 | |
1067 | return -EIO; |
1068 | } |
1069 | |
1070 | int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port, |
1071 | enum tb_cfg_space space, u32 offset, u32 length) |
1072 | { |
1073 | struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port, |
1074 | space, offset, length, timeout_msec: ctl->timeout_msec); |
1075 | switch (res.err) { |
1076 | case 0: |
1077 | /* Success */ |
1078 | break; |
1079 | |
1080 | case 1: |
1081 | /* Thunderbolt error, tb_error holds the actual number */ |
1082 | return tb_cfg_get_error(ctl, space, res: &res); |
1083 | |
1084 | case -ETIMEDOUT: |
1085 | tb_ctl_warn(ctl, "%llx: timeout reading config space %u from %#x\n" , |
1086 | route, space, offset); |
1087 | break; |
1088 | |
1089 | default: |
1090 | WARN(1, "tb_cfg_read: %d\n" , res.err); |
1091 | break; |
1092 | } |
1093 | return res.err; |
1094 | } |
1095 | |
1096 | int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port, |
1097 | enum tb_cfg_space space, u32 offset, u32 length) |
1098 | { |
1099 | struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port, |
1100 | space, offset, length, timeout_msec: ctl->timeout_msec); |
1101 | switch (res.err) { |
1102 | case 0: |
1103 | /* Success */ |
1104 | break; |
1105 | |
1106 | case 1: |
1107 | /* Thunderbolt error, tb_error holds the actual number */ |
1108 | return tb_cfg_get_error(ctl, space, res: &res); |
1109 | |
1110 | case -ETIMEDOUT: |
1111 | tb_ctl_warn(ctl, "%llx: timeout writing config space %u to %#x\n" , |
1112 | route, space, offset); |
1113 | break; |
1114 | |
1115 | default: |
1116 | WARN(1, "tb_cfg_write: %d\n" , res.err); |
1117 | break; |
1118 | } |
1119 | return res.err; |
1120 | } |
1121 | |
1122 | /** |
1123 | * tb_cfg_get_upstream_port() - get upstream port number of switch at route |
1124 | * @ctl: Pointer to the control channel |
1125 | * @route: Route string of the router |
1126 | * |
1127 | * Reads the first dword from the switches TB_CFG_SWITCH config area and |
1128 | * returns the port number from which the reply originated. |
1129 | * |
1130 | * Return: Returns the upstream port number on success or an error code on |
1131 | * failure. |
1132 | */ |
1133 | int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route) |
1134 | { |
1135 | u32 dummy; |
1136 | struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer: &dummy, route, port: 0, |
1137 | space: TB_CFG_SWITCH, offset: 0, length: 1, |
1138 | timeout_msec: ctl->timeout_msec); |
1139 | if (res.err == 1) |
1140 | return -EIO; |
1141 | if (res.err) |
1142 | return res.err; |
1143 | return res.response_port; |
1144 | } |
1145 | |