1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) ST-Ericsson AB 2010
4 * Author: Sjur Brendeland
5 */
6
7#include <linux/hardirq.h>
8#include <linux/init.h>
9#include <linux/module.h>
10#include <linux/device.h>
11#include <linux/types.h>
12#include <linux/skbuff.h>
13#include <linux/netdevice.h>
14#include <linux/rtnetlink.h>
15#include <linux/tty.h>
16#include <linux/file.h>
17#include <linux/if_arp.h>
18#include <net/caif/caif_device.h>
19#include <net/caif/cfcnfg.h>
20#include <linux/err.h>
21#include <linux/debugfs.h>
22
23MODULE_LICENSE("GPL");
24MODULE_AUTHOR("Sjur Brendeland");
25MODULE_DESCRIPTION("CAIF serial device TTY line discipline");
26MODULE_LICENSE("GPL");
27MODULE_ALIAS_LDISC(N_CAIF);
28
29#define SEND_QUEUE_LOW 10
30#define SEND_QUEUE_HIGH 100
31#define CAIF_SENDING 1 /* Bit 1 = 0x02*/
32#define CAIF_FLOW_OFF_SENT 4 /* Bit 4 = 0x10 */
33#define MAX_WRITE_CHUNK 4096
34#define ON 1
35#define OFF 0
36#define CAIF_MAX_MTU 4096
37
38static DEFINE_SPINLOCK(ser_lock);
39static LIST_HEAD(ser_list);
40static LIST_HEAD(ser_release_list);
41
42static bool ser_loop;
43module_param(ser_loop, bool, 0444);
44MODULE_PARM_DESC(ser_loop, "Run in simulated loopback mode.");
45
46static bool ser_use_stx = true;
47module_param(ser_use_stx, bool, 0444);
48MODULE_PARM_DESC(ser_use_stx, "STX enabled or not.");
49
50static bool ser_use_fcs = true;
51
52module_param(ser_use_fcs, bool, 0444);
53MODULE_PARM_DESC(ser_use_fcs, "FCS enabled or not.");
54
55static int ser_write_chunk = MAX_WRITE_CHUNK;
56module_param(ser_write_chunk, int, 0444);
57
58MODULE_PARM_DESC(ser_write_chunk, "Maximum size of data written to UART.");
59
60static struct dentry *debugfsdir;
61
62static int caif_net_open(struct net_device *dev);
63static int caif_net_close(struct net_device *dev);
64
65struct ser_device {
66 struct caif_dev_common common;
67 struct list_head node;
68 struct net_device *dev;
69 struct sk_buff_head head;
70 struct tty_struct *tty;
71 bool tx_started;
72 unsigned long state;
73#ifdef CONFIG_DEBUG_FS
74 struct dentry *debugfs_tty_dir;
75 struct debugfs_blob_wrapper tx_blob;
76 struct debugfs_blob_wrapper rx_blob;
77 u8 rx_data[128];
78 u8 tx_data[128];
79 u8 tty_status;
80
81#endif
82};
83
84static void caifdev_setup(struct net_device *dev);
85static void ldisc_tx_wakeup(struct tty_struct *tty);
86#ifdef CONFIG_DEBUG_FS
87static inline void update_tty_status(struct ser_device *ser)
88{
89 ser->tty_status =
90 ser->tty->flow.stopped << 5 |
91 ser->tty->flow.tco_stopped << 3 |
92 ser->tty->ctrl.packet << 2;
93}
94static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
95{
96 ser->debugfs_tty_dir = debugfs_create_dir(name: tty->name, parent: debugfsdir);
97
98 debugfs_create_blob(name: "last_tx_msg", mode: 0400, parent: ser->debugfs_tty_dir,
99 blob: &ser->tx_blob);
100
101 debugfs_create_blob(name: "last_rx_msg", mode: 0400, parent: ser->debugfs_tty_dir,
102 blob: &ser->rx_blob);
103
104 debugfs_create_xul(name: "ser_state", mode: 0400, parent: ser->debugfs_tty_dir,
105 value: &ser->state);
106
107 debugfs_create_x8(name: "tty_status", mode: 0400, parent: ser->debugfs_tty_dir,
108 value: &ser->tty_status);
109
110 ser->tx_blob.data = ser->tx_data;
111 ser->tx_blob.size = 0;
112 ser->rx_blob.data = ser->rx_data;
113 ser->rx_blob.size = 0;
114}
115
116static inline void debugfs_deinit(struct ser_device *ser)
117{
118 debugfs_remove_recursive(dentry: ser->debugfs_tty_dir);
119}
120
121static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
122{
123 if (size > sizeof(ser->rx_data))
124 size = sizeof(ser->rx_data);
125 memcpy(ser->rx_data, data, size);
126 ser->rx_blob.data = ser->rx_data;
127 ser->rx_blob.size = size;
128}
129
130static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
131{
132 if (size > sizeof(ser->tx_data))
133 size = sizeof(ser->tx_data);
134 memcpy(ser->tx_data, data, size);
135 ser->tx_blob.data = ser->tx_data;
136 ser->tx_blob.size = size;
137}
138#else
139static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
140{
141}
142
143static inline void debugfs_deinit(struct ser_device *ser)
144{
145}
146
147static inline void update_tty_status(struct ser_device *ser)
148{
149}
150
151static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
152{
153}
154
155static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
156{
157}
158
159#endif
160
161static void ldisc_receive(struct tty_struct *tty, const u8 *data,
162 const u8 *flags, size_t count)
163{
164 struct sk_buff *skb = NULL;
165 struct ser_device *ser;
166 int ret;
167
168 ser = tty->disc_data;
169
170 /*
171 * NOTE: flags may contain information about break or overrun.
172 * This is not yet handled.
173 */
174
175
176 /*
177 * Workaround for garbage at start of transmission,
178 * only enable if STX handling is not enabled.
179 */
180 if (!ser->common.use_stx && !ser->tx_started) {
181 dev_info(&ser->dev->dev,
182 "Bytes received before initial transmission -"
183 "bytes discarded.\n");
184 return;
185 }
186
187 BUG_ON(ser->dev == NULL);
188
189 /* Get a suitable caif packet and copy in data. */
190 skb = netdev_alloc_skb(dev: ser->dev, length: count+1);
191 if (skb == NULL)
192 return;
193 skb_put_data(skb, data, len: count);
194
195 skb->protocol = htons(ETH_P_CAIF);
196 skb_reset_mac_header(skb);
197 debugfs_rx(ser, data, size: count);
198 /* Push received packet up the stack. */
199 ret = netif_rx(skb);
200 if (!ret) {
201 ser->dev->stats.rx_packets++;
202 ser->dev->stats.rx_bytes += count;
203 } else
204 ++ser->dev->stats.rx_dropped;
205 update_tty_status(ser);
206}
207
208static int handle_tx(struct ser_device *ser)
209{
210 struct tty_struct *tty;
211 struct sk_buff *skb;
212 int tty_wr, len, room;
213
214 tty = ser->tty;
215 ser->tx_started = true;
216
217 /* Enter critical section */
218 if (test_and_set_bit(CAIF_SENDING, addr: &ser->state))
219 return 0;
220
221 /* skb_peek is safe because handle_tx is called after skb_queue_tail */
222 while ((skb = skb_peek(list_: &ser->head)) != NULL) {
223
224 /* Make sure you don't write too much */
225 len = skb->len;
226 room = tty_write_room(tty);
227 if (!room)
228 break;
229 if (room > ser_write_chunk)
230 room = ser_write_chunk;
231 if (len > room)
232 len = room;
233
234 /* Write to tty or loopback */
235 if (!ser_loop) {
236 tty_wr = tty->ops->write(tty, skb->data, len);
237 update_tty_status(ser);
238 } else {
239 tty_wr = len;
240 ldisc_receive(tty, data: skb->data, NULL, count: len);
241 }
242 ser->dev->stats.tx_packets++;
243 ser->dev->stats.tx_bytes += tty_wr;
244
245 /* Error on TTY ?! */
246 if (tty_wr < 0)
247 goto error;
248 /* Reduce buffer written, and discard if empty */
249 skb_pull(skb, len: tty_wr);
250 if (skb->len == 0) {
251 struct sk_buff *tmp = skb_dequeue(list: &ser->head);
252 WARN_ON(tmp != skb);
253 dev_consume_skb_any(skb);
254 }
255 }
256 /* Send flow off if queue is empty */
257 if (ser->head.qlen <= SEND_QUEUE_LOW &&
258 test_and_clear_bit(CAIF_FLOW_OFF_SENT, addr: &ser->state) &&
259 ser->common.flowctrl != NULL)
260 ser->common.flowctrl(ser->dev, ON);
261 clear_bit(CAIF_SENDING, addr: &ser->state);
262 return 0;
263error:
264 clear_bit(CAIF_SENDING, addr: &ser->state);
265 return tty_wr;
266}
267
268static netdev_tx_t caif_xmit(struct sk_buff *skb, struct net_device *dev)
269{
270 struct ser_device *ser;
271
272 ser = netdev_priv(dev);
273
274 /* Send flow off once, on high water mark */
275 if (ser->head.qlen > SEND_QUEUE_HIGH &&
276 !test_and_set_bit(CAIF_FLOW_OFF_SENT, addr: &ser->state) &&
277 ser->common.flowctrl != NULL)
278
279 ser->common.flowctrl(ser->dev, OFF);
280
281 skb_queue_tail(list: &ser->head, newsk: skb);
282 return handle_tx(ser);
283}
284
285
286static void ldisc_tx_wakeup(struct tty_struct *tty)
287{
288 struct ser_device *ser;
289
290 ser = tty->disc_data;
291 BUG_ON(ser == NULL);
292 WARN_ON(ser->tty != tty);
293 handle_tx(ser);
294}
295
296
297static void ser_release(struct work_struct *work)
298{
299 struct list_head list;
300 struct ser_device *ser, *tmp;
301
302 spin_lock(lock: &ser_lock);
303 list_replace_init(old: &ser_release_list, new: &list);
304 spin_unlock(lock: &ser_lock);
305
306 if (!list_empty(head: &list)) {
307 rtnl_lock();
308 list_for_each_entry_safe(ser, tmp, &list, node) {
309 dev_close(dev: ser->dev);
310 unregister_netdevice(dev: ser->dev);
311 debugfs_deinit(ser);
312 }
313 rtnl_unlock();
314 }
315}
316
317static DECLARE_WORK(ser_release_work, ser_release);
318
319static int ldisc_open(struct tty_struct *tty)
320{
321 struct ser_device *ser;
322 struct net_device *dev;
323 char name[64];
324 int result;
325
326 /* No write no play */
327 if (tty->ops->write == NULL)
328 return -EOPNOTSUPP;
329 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_TTY_CONFIG))
330 return -EPERM;
331
332 /* release devices to avoid name collision */
333 ser_release(NULL);
334
335 result = snprintf(buf: name, size: sizeof(name), fmt: "cf%s", tty->name);
336 if (result >= IFNAMSIZ)
337 return -EINVAL;
338 dev = alloc_netdev(sizeof(*ser), name, NET_NAME_UNKNOWN,
339 caifdev_setup);
340 if (!dev)
341 return -ENOMEM;
342
343 ser = netdev_priv(dev);
344 ser->tty = tty_kref_get(tty);
345 ser->dev = dev;
346 debugfs_init(ser, tty);
347 tty->receive_room = N_TTY_BUF_SIZE;
348 tty->disc_data = ser;
349 set_bit(TTY_DO_WRITE_WAKEUP, addr: &tty->flags);
350 rtnl_lock();
351 result = register_netdevice(dev);
352 if (result) {
353 tty_kref_put(tty);
354 rtnl_unlock();
355 free_netdev(dev);
356 return -ENODEV;
357 }
358
359 spin_lock(lock: &ser_lock);
360 list_add(new: &ser->node, head: &ser_list);
361 spin_unlock(lock: &ser_lock);
362 rtnl_unlock();
363 netif_stop_queue(dev);
364 update_tty_status(ser);
365 return 0;
366}
367
368static void ldisc_close(struct tty_struct *tty)
369{
370 struct ser_device *ser = tty->disc_data;
371
372 tty_kref_put(tty: ser->tty);
373
374 spin_lock(lock: &ser_lock);
375 list_move(list: &ser->node, head: &ser_release_list);
376 spin_unlock(lock: &ser_lock);
377 schedule_work(work: &ser_release_work);
378}
379
380/* The line discipline structure. */
381static struct tty_ldisc_ops caif_ldisc = {
382 .owner = THIS_MODULE,
383 .num = N_CAIF,
384 .name = "n_caif",
385 .open = ldisc_open,
386 .close = ldisc_close,
387 .receive_buf = ldisc_receive,
388 .write_wakeup = ldisc_tx_wakeup
389};
390
391static const struct net_device_ops netdev_ops = {
392 .ndo_open = caif_net_open,
393 .ndo_stop = caif_net_close,
394 .ndo_start_xmit = caif_xmit
395};
396
397static void caifdev_setup(struct net_device *dev)
398{
399 struct ser_device *serdev = netdev_priv(dev);
400
401 dev->features = 0;
402 dev->netdev_ops = &netdev_ops;
403 dev->type = ARPHRD_CAIF;
404 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
405 dev->mtu = CAIF_MAX_MTU;
406 dev->priv_flags |= IFF_NO_QUEUE;
407 dev->needs_free_netdev = true;
408 skb_queue_head_init(list: &serdev->head);
409 serdev->common.link_select = CAIF_LINK_LOW_LATENCY;
410 serdev->common.use_frag = true;
411 serdev->common.use_stx = ser_use_stx;
412 serdev->common.use_fcs = ser_use_fcs;
413 serdev->dev = dev;
414}
415
416
417static int caif_net_open(struct net_device *dev)
418{
419 netif_wake_queue(dev);
420 return 0;
421}
422
423static int caif_net_close(struct net_device *dev)
424{
425 netif_stop_queue(dev);
426 return 0;
427}
428
429static int __init caif_ser_init(void)
430{
431 int ret;
432
433 ret = tty_register_ldisc(new_ldisc: &caif_ldisc);
434 if (ret < 0)
435 pr_err("cannot register CAIF ldisc=%d err=%d\n", N_CAIF, ret);
436
437 debugfsdir = debugfs_create_dir(name: "caif_serial", NULL);
438 return ret;
439}
440
441static void __exit caif_ser_exit(void)
442{
443 spin_lock(lock: &ser_lock);
444 list_splice(list: &ser_list, head: &ser_release_list);
445 spin_unlock(lock: &ser_lock);
446 ser_release(NULL);
447 cancel_work_sync(work: &ser_release_work);
448 tty_unregister_ldisc(ldisc: &caif_ldisc);
449 debugfs_remove_recursive(dentry: debugfsdir);
450}
451
452module_init(caif_ser_init);
453module_exit(caif_ser_exit);
454

source code of linux/drivers/net/caif/caif_serial.c