1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2021, Linaro Ltd <loic.poulain@linaro.org> */
3
4#include <linux/bitmap.h>
5#include <linux/err.h>
6#include <linux/errno.h>
7#include <linux/debugfs.h>
8#include <linux/fs.h>
9#include <linux/init.h>
10#include <linux/idr.h>
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/poll.h>
14#include <linux/skbuff.h>
15#include <linux/slab.h>
16#include <linux/types.h>
17#include <linux/uaccess.h>
18#include <linux/termios.h>
19#include <linux/wwan.h>
20#include <net/rtnetlink.h>
21#include <uapi/linux/wwan.h>
22
23/* Maximum number of minors in use */
24#define WWAN_MAX_MINORS (1 << MINORBITS)
25
26static DEFINE_MUTEX(wwan_register_lock); /* WWAN device create|remove lock */
27static DEFINE_IDA(minors); /* minors for WWAN port chardevs */
28static DEFINE_IDA(wwan_dev_ids); /* for unique WWAN device IDs */
29static const struct class wwan_class = {
30 .name = "wwan",
31};
32static int wwan_major;
33static struct dentry *wwan_debugfs_dir;
34
35#define to_wwan_dev(d) container_of(d, struct wwan_device, dev)
36#define to_wwan_port(d) container_of(d, struct wwan_port, dev)
37
38/* WWAN port flags */
39#define WWAN_PORT_TX_OFF 0
40
41/**
42 * struct wwan_device - The structure that defines a WWAN device
43 *
44 * @id: WWAN device unique ID.
45 * @dev: Underlying device.
46 * @port_id: Current available port ID to pick.
47 * @ops: wwan device ops
48 * @ops_ctxt: context to pass to ops
49 * @debugfs_dir: WWAN device debugfs dir
50 */
51struct wwan_device {
52 unsigned int id;
53 struct device dev;
54 atomic_t port_id;
55 const struct wwan_ops *ops;
56 void *ops_ctxt;
57#ifdef CONFIG_WWAN_DEBUGFS
58 struct dentry *debugfs_dir;
59#endif
60};
61
62/**
63 * struct wwan_port - The structure that defines a WWAN port
64 * @type: Port type
65 * @start_count: Port start counter
66 * @flags: Store port state and capabilities
67 * @ops: Pointer to WWAN port operations
68 * @ops_lock: Protect port ops
69 * @dev: Underlying device
70 * @rxq: Buffer inbound queue
71 * @waitqueue: The waitqueue for port fops (read/write/poll)
72 * @data_lock: Port specific data access serialization
73 * @headroom_len: SKB reserved headroom size
74 * @frag_len: Length to fragment packet
75 * @at_data: AT port specific data
76 */
77struct wwan_port {
78 enum wwan_port_type type;
79 unsigned int start_count;
80 unsigned long flags;
81 const struct wwan_port_ops *ops;
82 struct mutex ops_lock; /* Serialize ops + protect against removal */
83 struct device dev;
84 struct sk_buff_head rxq;
85 wait_queue_head_t waitqueue;
86 struct mutex data_lock; /* Port specific data access serialization */
87 size_t headroom_len;
88 size_t frag_len;
89 union {
90 struct {
91 struct ktermios termios;
92 int mdmbits;
93 } at_data;
94 };
95};
96
97static ssize_t index_show(struct device *dev, struct device_attribute *attr, char *buf)
98{
99 struct wwan_device *wwan = to_wwan_dev(dev);
100
101 return sprintf(buf, fmt: "%d\n", wwan->id);
102}
103static DEVICE_ATTR_RO(index);
104
105static struct attribute *wwan_dev_attrs[] = {
106 &dev_attr_index.attr,
107 NULL,
108};
109ATTRIBUTE_GROUPS(wwan_dev);
110
111static void wwan_dev_destroy(struct device *dev)
112{
113 struct wwan_device *wwandev = to_wwan_dev(dev);
114
115 ida_free(&wwan_dev_ids, id: wwandev->id);
116 kfree(objp: wwandev);
117}
118
119static const struct device_type wwan_dev_type = {
120 .name = "wwan_dev",
121 .release = wwan_dev_destroy,
122 .groups = wwan_dev_groups,
123};
124
125static int wwan_dev_parent_match(struct device *dev, const void *parent)
126{
127 return (dev->type == &wwan_dev_type &&
128 (dev->parent == parent || dev == parent));
129}
130
131static struct wwan_device *wwan_dev_get_by_parent(struct device *parent)
132{
133 struct device *dev;
134
135 dev = class_find_device(class: &wwan_class, NULL, data: parent, match: wwan_dev_parent_match);
136 if (!dev)
137 return ERR_PTR(error: -ENODEV);
138
139 return to_wwan_dev(dev);
140}
141
142static int wwan_dev_name_match(struct device *dev, const void *name)
143{
144 return dev->type == &wwan_dev_type &&
145 strcmp(dev_name(dev), name) == 0;
146}
147
148static struct wwan_device *wwan_dev_get_by_name(const char *name)
149{
150 struct device *dev;
151
152 dev = class_find_device(class: &wwan_class, NULL, data: name, match: wwan_dev_name_match);
153 if (!dev)
154 return ERR_PTR(error: -ENODEV);
155
156 return to_wwan_dev(dev);
157}
158
159#ifdef CONFIG_WWAN_DEBUGFS
160struct dentry *wwan_get_debugfs_dir(struct device *parent)
161{
162 struct wwan_device *wwandev;
163
164 wwandev = wwan_dev_get_by_parent(parent);
165 if (IS_ERR(ptr: wwandev))
166 return ERR_CAST(ptr: wwandev);
167
168 return wwandev->debugfs_dir;
169}
170EXPORT_SYMBOL_GPL(wwan_get_debugfs_dir);
171
172static int wwan_dev_debugfs_match(struct device *dev, const void *dir)
173{
174 struct wwan_device *wwandev;
175
176 if (dev->type != &wwan_dev_type)
177 return 0;
178
179 wwandev = to_wwan_dev(dev);
180
181 return wwandev->debugfs_dir == dir;
182}
183
184static struct wwan_device *wwan_dev_get_by_debugfs(struct dentry *dir)
185{
186 struct device *dev;
187
188 dev = class_find_device(class: &wwan_class, NULL, data: dir, match: wwan_dev_debugfs_match);
189 if (!dev)
190 return ERR_PTR(error: -ENODEV);
191
192 return to_wwan_dev(dev);
193}
194
195void wwan_put_debugfs_dir(struct dentry *dir)
196{
197 struct wwan_device *wwandev = wwan_dev_get_by_debugfs(dir);
198
199 if (WARN_ON(IS_ERR(wwandev)))
200 return;
201
202 /* wwan_dev_get_by_debugfs() also got a reference */
203 put_device(dev: &wwandev->dev);
204 put_device(dev: &wwandev->dev);
205}
206EXPORT_SYMBOL_GPL(wwan_put_debugfs_dir);
207#endif
208
209/* This function allocates and registers a new WWAN device OR if a WWAN device
210 * already exist for the given parent, it gets a reference and return it.
211 * This function is not exported (for now), it is called indirectly via
212 * wwan_create_port().
213 */
214static struct wwan_device *wwan_create_dev(struct device *parent)
215{
216 struct wwan_device *wwandev;
217 int err, id;
218
219 /* The 'find-alloc-register' operation must be protected against
220 * concurrent execution, a WWAN device is possibly shared between
221 * multiple callers or concurrently unregistered from wwan_remove_dev().
222 */
223 mutex_lock(&wwan_register_lock);
224
225 /* If wwandev already exists, return it */
226 wwandev = wwan_dev_get_by_parent(parent);
227 if (!IS_ERR(ptr: wwandev))
228 goto done_unlock;
229
230 id = ida_alloc(ida: &wwan_dev_ids, GFP_KERNEL);
231 if (id < 0) {
232 wwandev = ERR_PTR(error: id);
233 goto done_unlock;
234 }
235
236 wwandev = kzalloc(size: sizeof(*wwandev), GFP_KERNEL);
237 if (!wwandev) {
238 wwandev = ERR_PTR(error: -ENOMEM);
239 ida_free(&wwan_dev_ids, id);
240 goto done_unlock;
241 }
242
243 wwandev->dev.parent = parent;
244 wwandev->dev.class = &wwan_class;
245 wwandev->dev.type = &wwan_dev_type;
246 wwandev->id = id;
247 dev_set_name(dev: &wwandev->dev, name: "wwan%d", wwandev->id);
248
249 err = device_register(dev: &wwandev->dev);
250 if (err) {
251 put_device(dev: &wwandev->dev);
252 wwandev = ERR_PTR(error: err);
253 goto done_unlock;
254 }
255
256#ifdef CONFIG_WWAN_DEBUGFS
257 wwandev->debugfs_dir =
258 debugfs_create_dir(name: kobject_name(kobj: &wwandev->dev.kobj),
259 parent: wwan_debugfs_dir);
260#endif
261
262done_unlock:
263 mutex_unlock(lock: &wwan_register_lock);
264
265 return wwandev;
266}
267
268static int is_wwan_child(struct device *dev, void *data)
269{
270 return dev->class == &wwan_class;
271}
272
273static void wwan_remove_dev(struct wwan_device *wwandev)
274{
275 int ret;
276
277 /* Prevent concurrent picking from wwan_create_dev */
278 mutex_lock(&wwan_register_lock);
279
280 /* WWAN device is created and registered (get+add) along with its first
281 * child port, and subsequent port registrations only grab a reference
282 * (get). The WWAN device must then be unregistered (del+put) along with
283 * its last port, and reference simply dropped (put) otherwise. In the
284 * same fashion, we must not unregister it when the ops are still there.
285 */
286 if (wwandev->ops)
287 ret = 1;
288 else
289 ret = device_for_each_child(dev: &wwandev->dev, NULL, fn: is_wwan_child);
290
291 if (!ret) {
292#ifdef CONFIG_WWAN_DEBUGFS
293 debugfs_remove_recursive(dentry: wwandev->debugfs_dir);
294#endif
295 device_unregister(dev: &wwandev->dev);
296 } else {
297 put_device(dev: &wwandev->dev);
298 }
299
300 mutex_unlock(lock: &wwan_register_lock);
301}
302
303/* ------- WWAN port management ------- */
304
305static const struct {
306 const char * const name; /* Port type name */
307 const char * const devsuf; /* Port device name suffix */
308} wwan_port_types[WWAN_PORT_MAX + 1] = {
309 [WWAN_PORT_AT] = {
310 .name = "AT",
311 .devsuf = "at",
312 },
313 [WWAN_PORT_MBIM] = {
314 .name = "MBIM",
315 .devsuf = "mbim",
316 },
317 [WWAN_PORT_QMI] = {
318 .name = "QMI",
319 .devsuf = "qmi",
320 },
321 [WWAN_PORT_QCDM] = {
322 .name = "QCDM",
323 .devsuf = "qcdm",
324 },
325 [WWAN_PORT_FIREHOSE] = {
326 .name = "FIREHOSE",
327 .devsuf = "firehose",
328 },
329 [WWAN_PORT_XMMRPC] = {
330 .name = "XMMRPC",
331 .devsuf = "xmmrpc",
332 },
333 [WWAN_PORT_FASTBOOT] = {
334 .name = "FASTBOOT",
335 .devsuf = "fastboot",
336 },
337};
338
339static ssize_t type_show(struct device *dev, struct device_attribute *attr,
340 char *buf)
341{
342 struct wwan_port *port = to_wwan_port(dev);
343
344 return sprintf(buf, fmt: "%s\n", wwan_port_types[port->type].name);
345}
346static DEVICE_ATTR_RO(type);
347
348static struct attribute *wwan_port_attrs[] = {
349 &dev_attr_type.attr,
350 NULL,
351};
352ATTRIBUTE_GROUPS(wwan_port);
353
354static void wwan_port_destroy(struct device *dev)
355{
356 struct wwan_port *port = to_wwan_port(dev);
357
358 ida_free(&minors, MINOR(port->dev.devt));
359 mutex_destroy(lock: &port->data_lock);
360 mutex_destroy(lock: &port->ops_lock);
361 kfree(objp: port);
362}
363
364static const struct device_type wwan_port_dev_type = {
365 .name = "wwan_port",
366 .release = wwan_port_destroy,
367 .groups = wwan_port_groups,
368};
369
370static int wwan_port_minor_match(struct device *dev, const void *minor)
371{
372 return (dev->type == &wwan_port_dev_type &&
373 MINOR(dev->devt) == *(unsigned int *)minor);
374}
375
376static struct wwan_port *wwan_port_get_by_minor(unsigned int minor)
377{
378 struct device *dev;
379
380 dev = class_find_device(class: &wwan_class, NULL, data: &minor, match: wwan_port_minor_match);
381 if (!dev)
382 return ERR_PTR(error: -ENODEV);
383
384 return to_wwan_port(dev);
385}
386
387/* Allocate and set unique name based on passed format
388 *
389 * Name allocation approach is highly inspired by the __dev_alloc_name()
390 * function.
391 *
392 * To avoid names collision, the caller must prevent the new port device
393 * registration as well as concurrent invocation of this function.
394 */
395static int __wwan_port_dev_assign_name(struct wwan_port *port, const char *fmt)
396{
397 struct wwan_device *wwandev = to_wwan_dev(port->dev.parent);
398 const unsigned int max_ports = PAGE_SIZE * 8;
399 struct class_dev_iter iter;
400 unsigned long *idmap;
401 struct device *dev;
402 char buf[0x20];
403 int id;
404
405 idmap = bitmap_zalloc(nbits: max_ports, GFP_KERNEL);
406 if (!idmap)
407 return -ENOMEM;
408
409 /* Collect ids of same name format ports */
410 class_dev_iter_init(iter: &iter, class: &wwan_class, NULL, type: &wwan_port_dev_type);
411 while ((dev = class_dev_iter_next(iter: &iter))) {
412 if (dev->parent != &wwandev->dev)
413 continue;
414 if (sscanf(dev_name(dev), fmt, &id) != 1)
415 continue;
416 if (id < 0 || id >= max_ports)
417 continue;
418 set_bit(nr: id, addr: idmap);
419 }
420 class_dev_iter_exit(iter: &iter);
421
422 /* Allocate unique id */
423 id = find_first_zero_bit(addr: idmap, size: max_ports);
424 bitmap_free(bitmap: idmap);
425
426 snprintf(buf, size: sizeof(buf), fmt, id); /* Name generation */
427
428 dev = device_find_child_by_name(parent: &wwandev->dev, name: buf);
429 if (dev) {
430 put_device(dev);
431 return -ENFILE;
432 }
433
434 return dev_set_name(dev: &port->dev, name: buf);
435}
436
437struct wwan_port *wwan_create_port(struct device *parent,
438 enum wwan_port_type type,
439 const struct wwan_port_ops *ops,
440 struct wwan_port_caps *caps,
441 void *drvdata)
442{
443 struct wwan_device *wwandev;
444 struct wwan_port *port;
445 char namefmt[0x20];
446 int minor, err;
447
448 if (type > WWAN_PORT_MAX || !ops)
449 return ERR_PTR(error: -EINVAL);
450
451 /* A port is always a child of a WWAN device, retrieve (allocate or
452 * pick) the WWAN device based on the provided parent device.
453 */
454 wwandev = wwan_create_dev(parent);
455 if (IS_ERR(ptr: wwandev))
456 return ERR_CAST(ptr: wwandev);
457
458 /* A port is exposed as character device, get a minor */
459 minor = ida_alloc_range(&minors, min: 0, WWAN_MAX_MINORS - 1, GFP_KERNEL);
460 if (minor < 0) {
461 err = minor;
462 goto error_wwandev_remove;
463 }
464
465 port = kzalloc(size: sizeof(*port), GFP_KERNEL);
466 if (!port) {
467 err = -ENOMEM;
468 ida_free(&minors, id: minor);
469 goto error_wwandev_remove;
470 }
471
472 port->type = type;
473 port->ops = ops;
474 port->frag_len = caps ? caps->frag_len : SIZE_MAX;
475 port->headroom_len = caps ? caps->headroom_len : 0;
476 mutex_init(&port->ops_lock);
477 skb_queue_head_init(list: &port->rxq);
478 init_waitqueue_head(&port->waitqueue);
479 mutex_init(&port->data_lock);
480
481 port->dev.parent = &wwandev->dev;
482 port->dev.class = &wwan_class;
483 port->dev.type = &wwan_port_dev_type;
484 port->dev.devt = MKDEV(wwan_major, minor);
485 dev_set_drvdata(dev: &port->dev, data: drvdata);
486
487 /* allocate unique name based on wwan device id, port type and number */
488 snprintf(buf: namefmt, size: sizeof(namefmt), fmt: "wwan%u%s%%d", wwandev->id,
489 wwan_port_types[port->type].devsuf);
490
491 /* Serialize ports registration */
492 mutex_lock(&wwan_register_lock);
493
494 __wwan_port_dev_assign_name(port, fmt: namefmt);
495 err = device_register(dev: &port->dev);
496
497 mutex_unlock(lock: &wwan_register_lock);
498
499 if (err)
500 goto error_put_device;
501
502 dev_info(&wwandev->dev, "port %s attached\n", dev_name(&port->dev));
503 return port;
504
505error_put_device:
506 put_device(dev: &port->dev);
507error_wwandev_remove:
508 wwan_remove_dev(wwandev);
509
510 return ERR_PTR(error: err);
511}
512EXPORT_SYMBOL_GPL(wwan_create_port);
513
514void wwan_remove_port(struct wwan_port *port)
515{
516 struct wwan_device *wwandev = to_wwan_dev(port->dev.parent);
517
518 mutex_lock(&port->ops_lock);
519 if (port->start_count)
520 port->ops->stop(port);
521 port->ops = NULL; /* Prevent any new port operations (e.g. from fops) */
522 mutex_unlock(lock: &port->ops_lock);
523
524 wake_up_interruptible(&port->waitqueue);
525
526 skb_queue_purge(list: &port->rxq);
527 dev_set_drvdata(dev: &port->dev, NULL);
528
529 dev_info(&wwandev->dev, "port %s disconnected\n", dev_name(&port->dev));
530 device_unregister(dev: &port->dev);
531
532 /* Release related wwan device */
533 wwan_remove_dev(wwandev);
534}
535EXPORT_SYMBOL_GPL(wwan_remove_port);
536
537void wwan_port_rx(struct wwan_port *port, struct sk_buff *skb)
538{
539 skb_queue_tail(list: &port->rxq, newsk: skb);
540 wake_up_interruptible(&port->waitqueue);
541}
542EXPORT_SYMBOL_GPL(wwan_port_rx);
543
544void wwan_port_txon(struct wwan_port *port)
545{
546 clear_bit(WWAN_PORT_TX_OFF, addr: &port->flags);
547 wake_up_interruptible(&port->waitqueue);
548}
549EXPORT_SYMBOL_GPL(wwan_port_txon);
550
551void wwan_port_txoff(struct wwan_port *port)
552{
553 set_bit(WWAN_PORT_TX_OFF, addr: &port->flags);
554}
555EXPORT_SYMBOL_GPL(wwan_port_txoff);
556
557void *wwan_port_get_drvdata(struct wwan_port *port)
558{
559 return dev_get_drvdata(dev: &port->dev);
560}
561EXPORT_SYMBOL_GPL(wwan_port_get_drvdata);
562
563static int wwan_port_op_start(struct wwan_port *port)
564{
565 int ret = 0;
566
567 mutex_lock(&port->ops_lock);
568 if (!port->ops) { /* Port got unplugged */
569 ret = -ENODEV;
570 goto out_unlock;
571 }
572
573 /* If port is already started, don't start again */
574 if (!port->start_count)
575 ret = port->ops->start(port);
576
577 if (!ret)
578 port->start_count++;
579
580out_unlock:
581 mutex_unlock(lock: &port->ops_lock);
582
583 return ret;
584}
585
586static void wwan_port_op_stop(struct wwan_port *port)
587{
588 mutex_lock(&port->ops_lock);
589 port->start_count--;
590 if (!port->start_count) {
591 if (port->ops)
592 port->ops->stop(port);
593 skb_queue_purge(list: &port->rxq);
594 }
595 mutex_unlock(lock: &port->ops_lock);
596}
597
598static int wwan_port_op_tx(struct wwan_port *port, struct sk_buff *skb,
599 bool nonblock)
600{
601 int ret;
602
603 mutex_lock(&port->ops_lock);
604 if (!port->ops) { /* Port got unplugged */
605 ret = -ENODEV;
606 goto out_unlock;
607 }
608
609 if (nonblock || !port->ops->tx_blocking)
610 ret = port->ops->tx(port, skb);
611 else
612 ret = port->ops->tx_blocking(port, skb);
613
614out_unlock:
615 mutex_unlock(lock: &port->ops_lock);
616
617 return ret;
618}
619
620static bool is_read_blocked(struct wwan_port *port)
621{
622 return skb_queue_empty(list: &port->rxq) && port->ops;
623}
624
625static bool is_write_blocked(struct wwan_port *port)
626{
627 return test_bit(WWAN_PORT_TX_OFF, &port->flags) && port->ops;
628}
629
630static int wwan_wait_rx(struct wwan_port *port, bool nonblock)
631{
632 if (!is_read_blocked(port))
633 return 0;
634
635 if (nonblock)
636 return -EAGAIN;
637
638 if (wait_event_interruptible(port->waitqueue, !is_read_blocked(port)))
639 return -ERESTARTSYS;
640
641 return 0;
642}
643
644static int wwan_wait_tx(struct wwan_port *port, bool nonblock)
645{
646 if (!is_write_blocked(port))
647 return 0;
648
649 if (nonblock)
650 return -EAGAIN;
651
652 if (wait_event_interruptible(port->waitqueue, !is_write_blocked(port)))
653 return -ERESTARTSYS;
654
655 return 0;
656}
657
658static int wwan_port_fops_open(struct inode *inode, struct file *file)
659{
660 struct wwan_port *port;
661 int err = 0;
662
663 port = wwan_port_get_by_minor(minor: iminor(inode));
664 if (IS_ERR(ptr: port))
665 return PTR_ERR(ptr: port);
666
667 file->private_data = port;
668 stream_open(inode, filp: file);
669
670 err = wwan_port_op_start(port);
671 if (err)
672 put_device(dev: &port->dev);
673
674 return err;
675}
676
677static int wwan_port_fops_release(struct inode *inode, struct file *filp)
678{
679 struct wwan_port *port = filp->private_data;
680
681 wwan_port_op_stop(port);
682 put_device(dev: &port->dev);
683
684 return 0;
685}
686
687static ssize_t wwan_port_fops_read(struct file *filp, char __user *buf,
688 size_t count, loff_t *ppos)
689{
690 struct wwan_port *port = filp->private_data;
691 struct sk_buff *skb;
692 size_t copied;
693 int ret;
694
695 ret = wwan_wait_rx(port, nonblock: !!(filp->f_flags & O_NONBLOCK));
696 if (ret)
697 return ret;
698
699 skb = skb_dequeue(list: &port->rxq);
700 if (!skb)
701 return -EIO;
702
703 copied = min_t(size_t, count, skb->len);
704 if (copy_to_user(to: buf, from: skb->data, n: copied)) {
705 kfree_skb(skb);
706 return -EFAULT;
707 }
708 skb_pull(skb, len: copied);
709
710 /* skb is not fully consumed, keep it in the queue */
711 if (skb->len)
712 skb_queue_head(list: &port->rxq, newsk: skb);
713 else
714 consume_skb(skb);
715
716 return copied;
717}
718
719static ssize_t wwan_port_fops_write(struct file *filp, const char __user *buf,
720 size_t count, loff_t *offp)
721{
722 struct sk_buff *skb, *head = NULL, *tail = NULL;
723 struct wwan_port *port = filp->private_data;
724 size_t frag_len, remain = count;
725 int ret;
726
727 ret = wwan_wait_tx(port, nonblock: !!(filp->f_flags & O_NONBLOCK));
728 if (ret)
729 return ret;
730
731 do {
732 frag_len = min(remain, port->frag_len);
733 skb = alloc_skb(size: frag_len + port->headroom_len, GFP_KERNEL);
734 if (!skb) {
735 ret = -ENOMEM;
736 goto freeskb;
737 }
738 skb_reserve(skb, len: port->headroom_len);
739
740 if (!head) {
741 head = skb;
742 } else if (!tail) {
743 skb_shinfo(head)->frag_list = skb;
744 tail = skb;
745 } else {
746 tail->next = skb;
747 tail = skb;
748 }
749
750 if (copy_from_user(to: skb_put(skb, len: frag_len), from: buf + count - remain, n: frag_len)) {
751 ret = -EFAULT;
752 goto freeskb;
753 }
754
755 if (skb != head) {
756 head->data_len += skb->len;
757 head->len += skb->len;
758 head->truesize += skb->truesize;
759 }
760 } while (remain -= frag_len);
761
762 ret = wwan_port_op_tx(port, skb: head, nonblock: !!(filp->f_flags & O_NONBLOCK));
763 if (!ret)
764 return count;
765
766freeskb:
767 kfree_skb(skb: head);
768 return ret;
769}
770
771static __poll_t wwan_port_fops_poll(struct file *filp, poll_table *wait)
772{
773 struct wwan_port *port = filp->private_data;
774 __poll_t mask = 0;
775
776 poll_wait(filp, wait_address: &port->waitqueue, p: wait);
777
778 mutex_lock(&port->ops_lock);
779 if (port->ops && port->ops->tx_poll)
780 mask |= port->ops->tx_poll(port, filp, wait);
781 else if (!is_write_blocked(port))
782 mask |= EPOLLOUT | EPOLLWRNORM;
783 if (!is_read_blocked(port))
784 mask |= EPOLLIN | EPOLLRDNORM;
785 if (!port->ops)
786 mask |= EPOLLHUP | EPOLLERR;
787 mutex_unlock(lock: &port->ops_lock);
788
789 return mask;
790}
791
792/* Implements minimalistic stub terminal IOCTLs support */
793static long wwan_port_fops_at_ioctl(struct wwan_port *port, unsigned int cmd,
794 unsigned long arg)
795{
796 int ret = 0;
797
798 mutex_lock(&port->data_lock);
799
800 switch (cmd) {
801 case TCFLSH:
802 break;
803
804 case TCGETS:
805 if (copy_to_user(to: (void __user *)arg, from: &port->at_data.termios,
806 n: sizeof(struct termios)))
807 ret = -EFAULT;
808 break;
809
810 case TCSETS:
811 case TCSETSW:
812 case TCSETSF:
813 if (copy_from_user(to: &port->at_data.termios, from: (void __user *)arg,
814 n: sizeof(struct termios)))
815 ret = -EFAULT;
816 break;
817
818#ifdef TCGETS2
819 case TCGETS2:
820 if (copy_to_user(to: (void __user *)arg, from: &port->at_data.termios,
821 n: sizeof(struct termios2)))
822 ret = -EFAULT;
823 break;
824
825 case TCSETS2:
826 case TCSETSW2:
827 case TCSETSF2:
828 if (copy_from_user(to: &port->at_data.termios, from: (void __user *)arg,
829 n: sizeof(struct termios2)))
830 ret = -EFAULT;
831 break;
832#endif
833
834 case TIOCMGET:
835 ret = put_user(port->at_data.mdmbits, (int __user *)arg);
836 break;
837
838 case TIOCMSET:
839 case TIOCMBIC:
840 case TIOCMBIS: {
841 int mdmbits;
842
843 if (copy_from_user(to: &mdmbits, from: (int __user *)arg, n: sizeof(int))) {
844 ret = -EFAULT;
845 break;
846 }
847 if (cmd == TIOCMBIC)
848 port->at_data.mdmbits &= ~mdmbits;
849 else if (cmd == TIOCMBIS)
850 port->at_data.mdmbits |= mdmbits;
851 else
852 port->at_data.mdmbits = mdmbits;
853 break;
854 }
855
856 default:
857 ret = -ENOIOCTLCMD;
858 }
859
860 mutex_unlock(lock: &port->data_lock);
861
862 return ret;
863}
864
865static long wwan_port_fops_ioctl(struct file *filp, unsigned int cmd,
866 unsigned long arg)
867{
868 struct wwan_port *port = filp->private_data;
869 int res;
870
871 if (port->type == WWAN_PORT_AT) { /* AT port specific IOCTLs */
872 res = wwan_port_fops_at_ioctl(port, cmd, arg);
873 if (res != -ENOIOCTLCMD)
874 return res;
875 }
876
877 switch (cmd) {
878 case TIOCINQ: { /* aka SIOCINQ aka FIONREAD */
879 unsigned long flags;
880 struct sk_buff *skb;
881 int amount = 0;
882
883 spin_lock_irqsave(&port->rxq.lock, flags);
884 skb_queue_walk(&port->rxq, skb)
885 amount += skb->len;
886 spin_unlock_irqrestore(lock: &port->rxq.lock, flags);
887
888 return put_user(amount, (int __user *)arg);
889 }
890
891 default:
892 return -ENOIOCTLCMD;
893 }
894}
895
896static const struct file_operations wwan_port_fops = {
897 .owner = THIS_MODULE,
898 .open = wwan_port_fops_open,
899 .release = wwan_port_fops_release,
900 .read = wwan_port_fops_read,
901 .write = wwan_port_fops_write,
902 .poll = wwan_port_fops_poll,
903 .unlocked_ioctl = wwan_port_fops_ioctl,
904#ifdef CONFIG_COMPAT
905 .compat_ioctl = compat_ptr_ioctl,
906#endif
907 .llseek = noop_llseek,
908};
909
910static int wwan_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
911 struct netlink_ext_ack *extack)
912{
913 if (!data)
914 return -EINVAL;
915
916 if (!tb[IFLA_PARENT_DEV_NAME])
917 return -EINVAL;
918
919 if (!data[IFLA_WWAN_LINK_ID])
920 return -EINVAL;
921
922 return 0;
923}
924
925static const struct device_type wwan_type = { .name = "wwan" };
926
927static struct net_device *wwan_rtnl_alloc(struct nlattr *tb[],
928 const char *ifname,
929 unsigned char name_assign_type,
930 unsigned int num_tx_queues,
931 unsigned int num_rx_queues)
932{
933 const char *devname = nla_data(nla: tb[IFLA_PARENT_DEV_NAME]);
934 struct wwan_device *wwandev = wwan_dev_get_by_name(name: devname);
935 struct net_device *dev;
936 unsigned int priv_size;
937
938 if (IS_ERR(ptr: wwandev))
939 return ERR_CAST(ptr: wwandev);
940
941 /* only supported if ops were registered (not just ports) */
942 if (!wwandev->ops) {
943 dev = ERR_PTR(error: -EOPNOTSUPP);
944 goto out;
945 }
946
947 priv_size = sizeof(struct wwan_netdev_priv) + wwandev->ops->priv_size;
948 dev = alloc_netdev_mqs(sizeof_priv: priv_size, name: ifname, name_assign_type,
949 setup: wwandev->ops->setup, txqs: num_tx_queues, rxqs: num_rx_queues);
950
951 if (dev) {
952 SET_NETDEV_DEV(dev, &wwandev->dev);
953 SET_NETDEV_DEVTYPE(dev, &wwan_type);
954 }
955
956out:
957 /* release the reference */
958 put_device(dev: &wwandev->dev);
959 return dev;
960}
961
962static int wwan_rtnl_newlink(struct net *src_net, struct net_device *dev,
963 struct nlattr *tb[], struct nlattr *data[],
964 struct netlink_ext_ack *extack)
965{
966 struct wwan_device *wwandev = wwan_dev_get_by_parent(parent: dev->dev.parent);
967 u32 link_id = nla_get_u32(nla: data[IFLA_WWAN_LINK_ID]);
968 struct wwan_netdev_priv *priv = netdev_priv(dev);
969 int ret;
970
971 if (IS_ERR(ptr: wwandev))
972 return PTR_ERR(ptr: wwandev);
973
974 /* shouldn't have a netdev (left) with us as parent so WARN */
975 if (WARN_ON(!wwandev->ops)) {
976 ret = -EOPNOTSUPP;
977 goto out;
978 }
979
980 priv->link_id = link_id;
981 if (wwandev->ops->newlink)
982 ret = wwandev->ops->newlink(wwandev->ops_ctxt, dev,
983 link_id, extack);
984 else
985 ret = register_netdevice(dev);
986
987out:
988 /* release the reference */
989 put_device(dev: &wwandev->dev);
990 return ret;
991}
992
993static void wwan_rtnl_dellink(struct net_device *dev, struct list_head *head)
994{
995 struct wwan_device *wwandev = wwan_dev_get_by_parent(parent: dev->dev.parent);
996
997 if (IS_ERR(ptr: wwandev))
998 return;
999
1000 /* shouldn't have a netdev (left) with us as parent so WARN */
1001 if (WARN_ON(!wwandev->ops))
1002 goto out;
1003
1004 if (wwandev->ops->dellink)
1005 wwandev->ops->dellink(wwandev->ops_ctxt, dev, head);
1006 else
1007 unregister_netdevice_queue(dev, head);
1008
1009out:
1010 /* release the reference */
1011 put_device(dev: &wwandev->dev);
1012}
1013
1014static size_t wwan_rtnl_get_size(const struct net_device *dev)
1015{
1016 return
1017 nla_total_size(payload: 4) + /* IFLA_WWAN_LINK_ID */
1018 0;
1019}
1020
1021static int wwan_rtnl_fill_info(struct sk_buff *skb,
1022 const struct net_device *dev)
1023{
1024 struct wwan_netdev_priv *priv = netdev_priv(dev);
1025
1026 if (nla_put_u32(skb, attrtype: IFLA_WWAN_LINK_ID, value: priv->link_id))
1027 goto nla_put_failure;
1028
1029 return 0;
1030
1031nla_put_failure:
1032 return -EMSGSIZE;
1033}
1034
1035static const struct nla_policy wwan_rtnl_policy[IFLA_WWAN_MAX + 1] = {
1036 [IFLA_WWAN_LINK_ID] = { .type = NLA_U32 },
1037};
1038
1039static struct rtnl_link_ops wwan_rtnl_link_ops __read_mostly = {
1040 .kind = "wwan",
1041 .maxtype = __IFLA_WWAN_MAX,
1042 .alloc = wwan_rtnl_alloc,
1043 .validate = wwan_rtnl_validate,
1044 .newlink = wwan_rtnl_newlink,
1045 .dellink = wwan_rtnl_dellink,
1046 .get_size = wwan_rtnl_get_size,
1047 .fill_info = wwan_rtnl_fill_info,
1048 .policy = wwan_rtnl_policy,
1049};
1050
1051static void wwan_create_default_link(struct wwan_device *wwandev,
1052 u32 def_link_id)
1053{
1054 struct nlattr *tb[IFLA_MAX + 1], *linkinfo[IFLA_INFO_MAX + 1];
1055 struct nlattr *data[IFLA_WWAN_MAX + 1];
1056 struct net_device *dev;
1057 struct nlmsghdr *nlh;
1058 struct sk_buff *msg;
1059
1060 /* Forge attributes required to create a WWAN netdev. We first
1061 * build a netlink message and then parse it. This looks
1062 * odd, but such approach is less error prone.
1063 */
1064 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1065 if (WARN_ON(!msg))
1066 return;
1067 nlh = nlmsg_put(skb: msg, portid: 0, seq: 0, RTM_NEWLINK, payload: 0, flags: 0);
1068 if (WARN_ON(!nlh))
1069 goto free_attrs;
1070
1071 if (nla_put_string(skb: msg, attrtype: IFLA_PARENT_DEV_NAME, str: dev_name(dev: &wwandev->dev)))
1072 goto free_attrs;
1073 tb[IFLA_LINKINFO] = nla_nest_start(skb: msg, IFLA_LINKINFO);
1074 if (!tb[IFLA_LINKINFO])
1075 goto free_attrs;
1076 linkinfo[IFLA_INFO_DATA] = nla_nest_start(skb: msg, attrtype: IFLA_INFO_DATA);
1077 if (!linkinfo[IFLA_INFO_DATA])
1078 goto free_attrs;
1079 if (nla_put_u32(skb: msg, attrtype: IFLA_WWAN_LINK_ID, value: def_link_id))
1080 goto free_attrs;
1081 nla_nest_end(skb: msg, start: linkinfo[IFLA_INFO_DATA]);
1082 nla_nest_end(skb: msg, start: tb[IFLA_LINKINFO]);
1083
1084 nlmsg_end(skb: msg, nlh);
1085
1086 /* The next three parsing calls can not fail */
1087 nlmsg_parse_deprecated(nlh, hdrlen: 0, tb, IFLA_MAX, NULL, NULL);
1088 nla_parse_nested_deprecated(tb: linkinfo, IFLA_INFO_MAX, nla: tb[IFLA_LINKINFO],
1089 NULL, NULL);
1090 nla_parse_nested_deprecated(tb: data, IFLA_WWAN_MAX,
1091 nla: linkinfo[IFLA_INFO_DATA], NULL, NULL);
1092
1093 rtnl_lock();
1094
1095 dev = rtnl_create_link(net: &init_net, ifname: "wwan%d", NET_NAME_ENUM,
1096 ops: &wwan_rtnl_link_ops, tb, NULL);
1097 if (WARN_ON(IS_ERR(dev)))
1098 goto unlock;
1099
1100 if (WARN_ON(wwan_rtnl_newlink(&init_net, dev, tb, data, NULL))) {
1101 free_netdev(dev);
1102 goto unlock;
1103 }
1104
1105 rtnl_configure_link(dev, NULL, portid: 0, NULL); /* Link initialized, notify new link */
1106
1107unlock:
1108 rtnl_unlock();
1109
1110free_attrs:
1111 nlmsg_free(skb: msg);
1112}
1113
1114/**
1115 * wwan_register_ops - register WWAN device ops
1116 * @parent: Device to use as parent and shared by all WWAN ports and
1117 * created netdevs
1118 * @ops: operations to register
1119 * @ctxt: context to pass to operations
1120 * @def_link_id: id of the default link that will be automatically created by
1121 * the WWAN core for the WWAN device. The default link will not be created
1122 * if the passed value is WWAN_NO_DEFAULT_LINK.
1123 *
1124 * Returns: 0 on success, a negative error code on failure
1125 */
1126int wwan_register_ops(struct device *parent, const struct wwan_ops *ops,
1127 void *ctxt, u32 def_link_id)
1128{
1129 struct wwan_device *wwandev;
1130
1131 if (WARN_ON(!parent || !ops || !ops->setup))
1132 return -EINVAL;
1133
1134 wwandev = wwan_create_dev(parent);
1135 if (IS_ERR(ptr: wwandev))
1136 return PTR_ERR(ptr: wwandev);
1137
1138 if (WARN_ON(wwandev->ops)) {
1139 wwan_remove_dev(wwandev);
1140 return -EBUSY;
1141 }
1142
1143 wwandev->ops = ops;
1144 wwandev->ops_ctxt = ctxt;
1145
1146 /* NB: we do not abort ops registration in case of default link
1147 * creation failure. Link ops is the management interface, while the
1148 * default link creation is a service option. And we should not prevent
1149 * a user from manually creating a link latter if service option failed
1150 * now.
1151 */
1152 if (def_link_id != WWAN_NO_DEFAULT_LINK)
1153 wwan_create_default_link(wwandev, def_link_id);
1154
1155 return 0;
1156}
1157EXPORT_SYMBOL_GPL(wwan_register_ops);
1158
1159/* Enqueue child netdev deletion */
1160static int wwan_child_dellink(struct device *dev, void *data)
1161{
1162 struct list_head *kill_list = data;
1163
1164 if (dev->type == &wwan_type)
1165 wwan_rtnl_dellink(to_net_dev(dev), head: kill_list);
1166
1167 return 0;
1168}
1169
1170/**
1171 * wwan_unregister_ops - remove WWAN device ops
1172 * @parent: Device to use as parent and shared by all WWAN ports and
1173 * created netdevs
1174 */
1175void wwan_unregister_ops(struct device *parent)
1176{
1177 struct wwan_device *wwandev = wwan_dev_get_by_parent(parent);
1178 LIST_HEAD(kill_list);
1179
1180 if (WARN_ON(IS_ERR(wwandev)))
1181 return;
1182 if (WARN_ON(!wwandev->ops)) {
1183 put_device(dev: &wwandev->dev);
1184 return;
1185 }
1186
1187 /* put the reference obtained by wwan_dev_get_by_parent(),
1188 * we should still have one (that the owner is giving back
1189 * now) due to the ops being assigned.
1190 */
1191 put_device(dev: &wwandev->dev);
1192
1193 rtnl_lock(); /* Prevent concurrent netdev(s) creation/destroying */
1194
1195 /* Remove all child netdev(s), using batch removing */
1196 device_for_each_child(dev: &wwandev->dev, data: &kill_list,
1197 fn: wwan_child_dellink);
1198 unregister_netdevice_many(head: &kill_list);
1199
1200 wwandev->ops = NULL; /* Finally remove ops */
1201
1202 rtnl_unlock();
1203
1204 wwandev->ops_ctxt = NULL;
1205 wwan_remove_dev(wwandev);
1206}
1207EXPORT_SYMBOL_GPL(wwan_unregister_ops);
1208
1209static int __init wwan_init(void)
1210{
1211 int err;
1212
1213 err = rtnl_link_register(ops: &wwan_rtnl_link_ops);
1214 if (err)
1215 return err;
1216
1217 err = class_register(class: &wwan_class);
1218 if (err)
1219 goto unregister;
1220
1221 /* chrdev used for wwan ports */
1222 wwan_major = __register_chrdev(major: 0, baseminor: 0, WWAN_MAX_MINORS, name: "wwan_port",
1223 fops: &wwan_port_fops);
1224 if (wwan_major < 0) {
1225 err = wwan_major;
1226 goto destroy;
1227 }
1228
1229#ifdef CONFIG_WWAN_DEBUGFS
1230 wwan_debugfs_dir = debugfs_create_dir(name: "wwan", NULL);
1231#endif
1232
1233 return 0;
1234
1235destroy:
1236 class_unregister(class: &wwan_class);
1237unregister:
1238 rtnl_link_unregister(ops: &wwan_rtnl_link_ops);
1239 return err;
1240}
1241
1242static void __exit wwan_exit(void)
1243{
1244 debugfs_remove_recursive(dentry: wwan_debugfs_dir);
1245 __unregister_chrdev(major: wwan_major, baseminor: 0, WWAN_MAX_MINORS, name: "wwan_port");
1246 rtnl_link_unregister(ops: &wwan_rtnl_link_ops);
1247 class_unregister(class: &wwan_class);
1248}
1249
1250module_init(wwan_init);
1251module_exit(wwan_exit);
1252
1253MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
1254MODULE_DESCRIPTION("WWAN core");
1255MODULE_LICENSE("GPL v2");
1256

source code of linux/drivers/net/wwan/wwan_core.c