1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * xhci-dbgtty.c - tty glue for xHCI debug capability |
4 | * |
5 | * Copyright (C) 2017 Intel Corporation |
6 | * |
7 | * Author: Lu Baolu <baolu.lu@linux.intel.com> |
8 | */ |
9 | |
10 | #include <linux/slab.h> |
11 | #include <linux/tty.h> |
12 | #include <linux/tty_flip.h> |
13 | #include <linux/idr.h> |
14 | |
15 | #include "xhci.h" |
16 | #include "xhci-dbgcap.h" |
17 | |
18 | static struct tty_driver *dbc_tty_driver; |
19 | static struct idr dbc_tty_minors; |
20 | static DEFINE_MUTEX(dbc_tty_minors_lock); |
21 | |
22 | static inline struct dbc_port *dbc_to_port(struct xhci_dbc *dbc) |
23 | { |
24 | return dbc->priv; |
25 | } |
26 | |
27 | static unsigned int |
28 | dbc_send_packet(struct dbc_port *port, char *packet, unsigned int size) |
29 | { |
30 | unsigned int len; |
31 | |
32 | len = kfifo_len(&port->write_fifo); |
33 | if (len < size) |
34 | size = len; |
35 | if (size != 0) |
36 | size = kfifo_out(&port->write_fifo, packet, size); |
37 | return size; |
38 | } |
39 | |
40 | static int dbc_start_tx(struct dbc_port *port) |
41 | __releases(&port->port_lock) |
42 | __acquires(&port->port_lock) |
43 | { |
44 | int len; |
45 | struct dbc_request *req; |
46 | int status = 0; |
47 | bool do_tty_wake = false; |
48 | struct list_head *pool = &port->write_pool; |
49 | |
50 | while (!list_empty(head: pool)) { |
51 | req = list_entry(pool->next, struct dbc_request, list_pool); |
52 | len = dbc_send_packet(port, packet: req->buf, DBC_MAX_PACKET); |
53 | if (len == 0) |
54 | break; |
55 | do_tty_wake = true; |
56 | |
57 | req->length = len; |
58 | list_del(entry: &req->list_pool); |
59 | |
60 | spin_unlock(lock: &port->port_lock); |
61 | status = dbc_ep_queue(req); |
62 | spin_lock(lock: &port->port_lock); |
63 | |
64 | if (status) { |
65 | list_add(new: &req->list_pool, head: pool); |
66 | break; |
67 | } |
68 | } |
69 | |
70 | if (do_tty_wake && port->port.tty) |
71 | tty_wakeup(tty: port->port.tty); |
72 | |
73 | return status; |
74 | } |
75 | |
76 | static void dbc_start_rx(struct dbc_port *port) |
77 | __releases(&port->port_lock) |
78 | __acquires(&port->port_lock) |
79 | { |
80 | struct dbc_request *req; |
81 | int status; |
82 | struct list_head *pool = &port->read_pool; |
83 | |
84 | while (!list_empty(head: pool)) { |
85 | if (!port->port.tty) |
86 | break; |
87 | |
88 | req = list_entry(pool->next, struct dbc_request, list_pool); |
89 | list_del(entry: &req->list_pool); |
90 | req->length = DBC_MAX_PACKET; |
91 | |
92 | spin_unlock(lock: &port->port_lock); |
93 | status = dbc_ep_queue(req); |
94 | spin_lock(lock: &port->port_lock); |
95 | |
96 | if (status) { |
97 | list_add(new: &req->list_pool, head: pool); |
98 | break; |
99 | } |
100 | } |
101 | } |
102 | |
103 | static void |
104 | dbc_read_complete(struct xhci_dbc *dbc, struct dbc_request *req) |
105 | { |
106 | unsigned long flags; |
107 | struct dbc_port *port = dbc_to_port(dbc); |
108 | |
109 | spin_lock_irqsave(&port->port_lock, flags); |
110 | list_add_tail(new: &req->list_pool, head: &port->read_queue); |
111 | tasklet_schedule(t: &port->push); |
112 | spin_unlock_irqrestore(lock: &port->port_lock, flags); |
113 | } |
114 | |
115 | static void dbc_write_complete(struct xhci_dbc *dbc, struct dbc_request *req) |
116 | { |
117 | unsigned long flags; |
118 | struct dbc_port *port = dbc_to_port(dbc); |
119 | |
120 | spin_lock_irqsave(&port->port_lock, flags); |
121 | list_add(new: &req->list_pool, head: &port->write_pool); |
122 | switch (req->status) { |
123 | case 0: |
124 | dbc_start_tx(port); |
125 | break; |
126 | case -ESHUTDOWN: |
127 | break; |
128 | default: |
129 | dev_warn(dbc->dev, "unexpected write complete status %d\n" , |
130 | req->status); |
131 | break; |
132 | } |
133 | spin_unlock_irqrestore(lock: &port->port_lock, flags); |
134 | } |
135 | |
136 | static void xhci_dbc_free_req(struct dbc_request *req) |
137 | { |
138 | kfree(objp: req->buf); |
139 | dbc_free_request(req); |
140 | } |
141 | |
142 | static int |
143 | xhci_dbc_alloc_requests(struct xhci_dbc *dbc, unsigned int direction, |
144 | struct list_head *head, |
145 | void (*fn)(struct xhci_dbc *, struct dbc_request *)) |
146 | { |
147 | int i; |
148 | struct dbc_request *req; |
149 | |
150 | for (i = 0; i < DBC_QUEUE_SIZE; i++) { |
151 | req = dbc_alloc_request(dbc, direction, GFP_KERNEL); |
152 | if (!req) |
153 | break; |
154 | |
155 | req->length = DBC_MAX_PACKET; |
156 | req->buf = kmalloc(size: req->length, GFP_KERNEL); |
157 | if (!req->buf) { |
158 | dbc_free_request(req); |
159 | break; |
160 | } |
161 | |
162 | req->complete = fn; |
163 | list_add_tail(new: &req->list_pool, head); |
164 | } |
165 | |
166 | return list_empty(head) ? -ENOMEM : 0; |
167 | } |
168 | |
169 | static void |
170 | xhci_dbc_free_requests(struct list_head *head) |
171 | { |
172 | struct dbc_request *req; |
173 | |
174 | while (!list_empty(head)) { |
175 | req = list_entry(head->next, struct dbc_request, list_pool); |
176 | list_del(entry: &req->list_pool); |
177 | xhci_dbc_free_req(req); |
178 | } |
179 | } |
180 | |
181 | static int dbc_tty_install(struct tty_driver *driver, struct tty_struct *tty) |
182 | { |
183 | struct dbc_port *port; |
184 | |
185 | mutex_lock(&dbc_tty_minors_lock); |
186 | port = idr_find(&dbc_tty_minors, id: tty->index); |
187 | mutex_unlock(lock: &dbc_tty_minors_lock); |
188 | |
189 | if (!port) |
190 | return -ENXIO; |
191 | |
192 | tty->driver_data = port; |
193 | |
194 | return tty_port_install(port: &port->port, driver, tty); |
195 | } |
196 | |
197 | static int dbc_tty_open(struct tty_struct *tty, struct file *file) |
198 | { |
199 | struct dbc_port *port = tty->driver_data; |
200 | |
201 | return tty_port_open(port: &port->port, tty, filp: file); |
202 | } |
203 | |
204 | static void dbc_tty_close(struct tty_struct *tty, struct file *file) |
205 | { |
206 | struct dbc_port *port = tty->driver_data; |
207 | |
208 | tty_port_close(port: &port->port, tty, filp: file); |
209 | } |
210 | |
211 | static ssize_t dbc_tty_write(struct tty_struct *tty, const u8 *buf, |
212 | size_t count) |
213 | { |
214 | struct dbc_port *port = tty->driver_data; |
215 | unsigned long flags; |
216 | |
217 | spin_lock_irqsave(&port->port_lock, flags); |
218 | if (count) |
219 | count = kfifo_in(&port->write_fifo, buf, count); |
220 | dbc_start_tx(port); |
221 | spin_unlock_irqrestore(lock: &port->port_lock, flags); |
222 | |
223 | return count; |
224 | } |
225 | |
226 | static int dbc_tty_put_char(struct tty_struct *tty, u8 ch) |
227 | { |
228 | struct dbc_port *port = tty->driver_data; |
229 | unsigned long flags; |
230 | int status; |
231 | |
232 | spin_lock_irqsave(&port->port_lock, flags); |
233 | status = kfifo_put(&port->write_fifo, ch); |
234 | spin_unlock_irqrestore(lock: &port->port_lock, flags); |
235 | |
236 | return status; |
237 | } |
238 | |
239 | static void dbc_tty_flush_chars(struct tty_struct *tty) |
240 | { |
241 | struct dbc_port *port = tty->driver_data; |
242 | unsigned long flags; |
243 | |
244 | spin_lock_irqsave(&port->port_lock, flags); |
245 | dbc_start_tx(port); |
246 | spin_unlock_irqrestore(lock: &port->port_lock, flags); |
247 | } |
248 | |
249 | static unsigned int dbc_tty_write_room(struct tty_struct *tty) |
250 | { |
251 | struct dbc_port *port = tty->driver_data; |
252 | unsigned long flags; |
253 | unsigned int room; |
254 | |
255 | spin_lock_irqsave(&port->port_lock, flags); |
256 | room = kfifo_avail(&port->write_fifo); |
257 | spin_unlock_irqrestore(lock: &port->port_lock, flags); |
258 | |
259 | return room; |
260 | } |
261 | |
262 | static unsigned int dbc_tty_chars_in_buffer(struct tty_struct *tty) |
263 | { |
264 | struct dbc_port *port = tty->driver_data; |
265 | unsigned long flags; |
266 | unsigned int chars; |
267 | |
268 | spin_lock_irqsave(&port->port_lock, flags); |
269 | chars = kfifo_len(&port->write_fifo); |
270 | spin_unlock_irqrestore(lock: &port->port_lock, flags); |
271 | |
272 | return chars; |
273 | } |
274 | |
275 | static void dbc_tty_unthrottle(struct tty_struct *tty) |
276 | { |
277 | struct dbc_port *port = tty->driver_data; |
278 | unsigned long flags; |
279 | |
280 | spin_lock_irqsave(&port->port_lock, flags); |
281 | tasklet_schedule(t: &port->push); |
282 | spin_unlock_irqrestore(lock: &port->port_lock, flags); |
283 | } |
284 | |
285 | static const struct tty_operations dbc_tty_ops = { |
286 | .install = dbc_tty_install, |
287 | .open = dbc_tty_open, |
288 | .close = dbc_tty_close, |
289 | .write = dbc_tty_write, |
290 | .put_char = dbc_tty_put_char, |
291 | .flush_chars = dbc_tty_flush_chars, |
292 | .write_room = dbc_tty_write_room, |
293 | .chars_in_buffer = dbc_tty_chars_in_buffer, |
294 | .unthrottle = dbc_tty_unthrottle, |
295 | }; |
296 | |
297 | static void dbc_rx_push(struct tasklet_struct *t) |
298 | { |
299 | struct dbc_request *req; |
300 | struct tty_struct *tty; |
301 | unsigned long flags; |
302 | bool do_push = false; |
303 | bool disconnect = false; |
304 | struct dbc_port *port = from_tasklet(port, t, push); |
305 | struct list_head *queue = &port->read_queue; |
306 | |
307 | spin_lock_irqsave(&port->port_lock, flags); |
308 | tty = port->port.tty; |
309 | while (!list_empty(head: queue)) { |
310 | req = list_first_entry(queue, struct dbc_request, list_pool); |
311 | |
312 | if (tty && tty_throttled(tty)) |
313 | break; |
314 | |
315 | switch (req->status) { |
316 | case 0: |
317 | break; |
318 | case -ESHUTDOWN: |
319 | disconnect = true; |
320 | break; |
321 | default: |
322 | pr_warn("ttyDBC0: unexpected RX status %d\n" , |
323 | req->status); |
324 | break; |
325 | } |
326 | |
327 | if (req->actual) { |
328 | char *packet = req->buf; |
329 | unsigned int n, size = req->actual; |
330 | int count; |
331 | |
332 | n = port->n_read; |
333 | if (n) { |
334 | packet += n; |
335 | size -= n; |
336 | } |
337 | |
338 | count = tty_insert_flip_string(port: &port->port, chars: packet, |
339 | size); |
340 | if (count) |
341 | do_push = true; |
342 | if (count != size) { |
343 | port->n_read += count; |
344 | break; |
345 | } |
346 | port->n_read = 0; |
347 | } |
348 | |
349 | list_move(list: &req->list_pool, head: &port->read_pool); |
350 | } |
351 | |
352 | if (do_push) |
353 | tty_flip_buffer_push(port: &port->port); |
354 | |
355 | if (!list_empty(head: queue) && tty) { |
356 | if (!tty_throttled(tty)) { |
357 | if (do_push) |
358 | tasklet_schedule(t: &port->push); |
359 | else |
360 | pr_warn("ttyDBC0: RX not scheduled?\n" ); |
361 | } |
362 | } |
363 | |
364 | if (!disconnect) |
365 | dbc_start_rx(port); |
366 | |
367 | spin_unlock_irqrestore(lock: &port->port_lock, flags); |
368 | } |
369 | |
370 | static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty) |
371 | { |
372 | unsigned long flags; |
373 | struct dbc_port *port = container_of(_port, struct dbc_port, port); |
374 | |
375 | spin_lock_irqsave(&port->port_lock, flags); |
376 | dbc_start_rx(port); |
377 | spin_unlock_irqrestore(lock: &port->port_lock, flags); |
378 | |
379 | return 0; |
380 | } |
381 | |
382 | static const struct tty_port_operations dbc_port_ops = { |
383 | .activate = dbc_port_activate, |
384 | }; |
385 | |
386 | static void |
387 | xhci_dbc_tty_init_port(struct xhci_dbc *dbc, struct dbc_port *port) |
388 | { |
389 | tty_port_init(port: &port->port); |
390 | spin_lock_init(&port->port_lock); |
391 | tasklet_setup(t: &port->push, callback: dbc_rx_push); |
392 | INIT_LIST_HEAD(list: &port->read_pool); |
393 | INIT_LIST_HEAD(list: &port->read_queue); |
394 | INIT_LIST_HEAD(list: &port->write_pool); |
395 | |
396 | port->port.ops = &dbc_port_ops; |
397 | port->n_read = 0; |
398 | } |
399 | |
400 | static void |
401 | xhci_dbc_tty_exit_port(struct dbc_port *port) |
402 | { |
403 | tasklet_kill(t: &port->push); |
404 | tty_port_destroy(port: &port->port); |
405 | } |
406 | |
407 | static int xhci_dbc_tty_register_device(struct xhci_dbc *dbc) |
408 | { |
409 | int ret; |
410 | struct device *tty_dev; |
411 | struct dbc_port *port = dbc_to_port(dbc); |
412 | |
413 | if (port->registered) |
414 | return -EBUSY; |
415 | |
416 | xhci_dbc_tty_init_port(dbc, port); |
417 | |
418 | mutex_lock(&dbc_tty_minors_lock); |
419 | port->minor = idr_alloc(&dbc_tty_minors, ptr: port, start: 0, end: 64, GFP_KERNEL); |
420 | mutex_unlock(lock: &dbc_tty_minors_lock); |
421 | |
422 | if (port->minor < 0) { |
423 | ret = port->minor; |
424 | goto err_idr; |
425 | } |
426 | |
427 | ret = kfifo_alloc(&port->write_fifo, DBC_WRITE_BUF_SIZE, GFP_KERNEL); |
428 | if (ret) |
429 | goto err_exit_port; |
430 | |
431 | ret = xhci_dbc_alloc_requests(dbc, BULK_IN, head: &port->read_pool, |
432 | fn: dbc_read_complete); |
433 | if (ret) |
434 | goto err_free_fifo; |
435 | |
436 | ret = xhci_dbc_alloc_requests(dbc, BULK_OUT, head: &port->write_pool, |
437 | fn: dbc_write_complete); |
438 | if (ret) |
439 | goto err_free_requests; |
440 | |
441 | tty_dev = tty_port_register_device(port: &port->port, |
442 | driver: dbc_tty_driver, index: port->minor, NULL); |
443 | if (IS_ERR(ptr: tty_dev)) { |
444 | ret = PTR_ERR(ptr: tty_dev); |
445 | goto err_free_requests; |
446 | } |
447 | |
448 | port->registered = true; |
449 | |
450 | return 0; |
451 | |
452 | err_free_requests: |
453 | xhci_dbc_free_requests(head: &port->read_pool); |
454 | xhci_dbc_free_requests(head: &port->write_pool); |
455 | err_free_fifo: |
456 | kfifo_free(&port->write_fifo); |
457 | err_exit_port: |
458 | idr_remove(&dbc_tty_minors, id: port->minor); |
459 | err_idr: |
460 | xhci_dbc_tty_exit_port(port); |
461 | |
462 | dev_err(dbc->dev, "can't register tty port, err %d\n" , ret); |
463 | |
464 | return ret; |
465 | } |
466 | |
467 | static void xhci_dbc_tty_unregister_device(struct xhci_dbc *dbc) |
468 | { |
469 | struct dbc_port *port = dbc_to_port(dbc); |
470 | |
471 | if (!port->registered) |
472 | return; |
473 | tty_unregister_device(driver: dbc_tty_driver, index: port->minor); |
474 | xhci_dbc_tty_exit_port(port); |
475 | port->registered = false; |
476 | |
477 | mutex_lock(&dbc_tty_minors_lock); |
478 | idr_remove(&dbc_tty_minors, id: port->minor); |
479 | mutex_unlock(lock: &dbc_tty_minors_lock); |
480 | |
481 | kfifo_free(&port->write_fifo); |
482 | xhci_dbc_free_requests(head: &port->read_pool); |
483 | xhci_dbc_free_requests(head: &port->read_queue); |
484 | xhci_dbc_free_requests(head: &port->write_pool); |
485 | } |
486 | |
487 | static const struct dbc_driver dbc_driver = { |
488 | .configure = xhci_dbc_tty_register_device, |
489 | .disconnect = xhci_dbc_tty_unregister_device, |
490 | }; |
491 | |
492 | int xhci_dbc_tty_probe(struct device *dev, void __iomem *base, struct xhci_hcd *xhci) |
493 | { |
494 | struct xhci_dbc *dbc; |
495 | struct dbc_port *port; |
496 | int status; |
497 | |
498 | if (!dbc_tty_driver) |
499 | return -ENODEV; |
500 | |
501 | port = kzalloc(size: sizeof(*port), GFP_KERNEL); |
502 | if (!port) |
503 | return -ENOMEM; |
504 | |
505 | dbc = xhci_alloc_dbc(dev, res: base, driver: &dbc_driver); |
506 | |
507 | if (!dbc) { |
508 | status = -ENOMEM; |
509 | goto out2; |
510 | } |
511 | |
512 | dbc->priv = port; |
513 | |
514 | /* get rid of xhci once this is a real driver binding to a device */ |
515 | xhci->dbc = dbc; |
516 | |
517 | return 0; |
518 | out2: |
519 | kfree(objp: port); |
520 | |
521 | return status; |
522 | } |
523 | |
524 | /* |
525 | * undo what probe did, assume dbc is stopped already. |
526 | * we also assume tty_unregister_device() is called before this |
527 | */ |
528 | void xhci_dbc_tty_remove(struct xhci_dbc *dbc) |
529 | { |
530 | struct dbc_port *port = dbc_to_port(dbc); |
531 | |
532 | xhci_dbc_remove(dbc); |
533 | kfree(objp: port); |
534 | } |
535 | |
536 | int dbc_tty_init(void) |
537 | { |
538 | int ret; |
539 | |
540 | idr_init(idr: &dbc_tty_minors); |
541 | |
542 | dbc_tty_driver = tty_alloc_driver(64, TTY_DRIVER_REAL_RAW | |
543 | TTY_DRIVER_DYNAMIC_DEV); |
544 | if (IS_ERR(ptr: dbc_tty_driver)) { |
545 | idr_destroy(&dbc_tty_minors); |
546 | return PTR_ERR(ptr: dbc_tty_driver); |
547 | } |
548 | |
549 | dbc_tty_driver->driver_name = "dbc_serial" ; |
550 | dbc_tty_driver->name = "ttyDBC" ; |
551 | |
552 | dbc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; |
553 | dbc_tty_driver->subtype = SERIAL_TYPE_NORMAL; |
554 | dbc_tty_driver->init_termios = tty_std_termios; |
555 | dbc_tty_driver->init_termios.c_cflag = |
556 | B9600 | CS8 | CREAD | HUPCL | CLOCAL; |
557 | dbc_tty_driver->init_termios.c_ispeed = 9600; |
558 | dbc_tty_driver->init_termios.c_ospeed = 9600; |
559 | |
560 | tty_set_operations(driver: dbc_tty_driver, op: &dbc_tty_ops); |
561 | |
562 | ret = tty_register_driver(driver: dbc_tty_driver); |
563 | if (ret) { |
564 | pr_err("Can't register dbc tty driver\n" ); |
565 | tty_driver_kref_put(driver: dbc_tty_driver); |
566 | idr_destroy(&dbc_tty_minors); |
567 | } |
568 | |
569 | return ret; |
570 | } |
571 | |
572 | void dbc_tty_exit(void) |
573 | { |
574 | if (dbc_tty_driver) { |
575 | tty_unregister_driver(driver: dbc_tty_driver); |
576 | tty_driver_kref_put(driver: dbc_tty_driver); |
577 | dbc_tty_driver = NULL; |
578 | } |
579 | |
580 | idr_destroy(&dbc_tty_minors); |
581 | } |
582 | |