1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * PPP async serial channel driver for Linux. |
4 | * |
5 | * Copyright 1999 Paul Mackerras. |
6 | * |
7 | * This driver provides the encapsulation and framing for sending |
8 | * and receiving PPP frames over async serial lines. It relies on |
9 | * the generic PPP layer to give it frames to send and to process |
10 | * received frames. It implements the PPP line discipline. |
11 | * |
12 | * Part of the code in this driver was inspired by the old async-only |
13 | * PPP driver, written by Michael Callahan and Al Longyear, and |
14 | * subsequently hacked by Paul Mackerras. |
15 | */ |
16 | |
17 | #include <linux/module.h> |
18 | #include <linux/kernel.h> |
19 | #include <linux/skbuff.h> |
20 | #include <linux/tty.h> |
21 | #include <linux/netdevice.h> |
22 | #include <linux/poll.h> |
23 | #include <linux/crc-ccitt.h> |
24 | #include <linux/ppp_defs.h> |
25 | #include <linux/ppp-ioctl.h> |
26 | #include <linux/ppp_channel.h> |
27 | #include <linux/spinlock.h> |
28 | #include <linux/init.h> |
29 | #include <linux/interrupt.h> |
30 | #include <linux/jiffies.h> |
31 | #include <linux/slab.h> |
32 | #include <asm/unaligned.h> |
33 | #include <linux/uaccess.h> |
34 | #include <asm/string.h> |
35 | |
36 | #define PPP_VERSION "2.4.2" |
37 | |
38 | #define OBUFSIZE 4096 |
39 | |
40 | /* Structure for storing local state. */ |
41 | struct asyncppp { |
42 | struct tty_struct *tty; |
43 | unsigned int flags; |
44 | unsigned int state; |
45 | unsigned int rbits; |
46 | int mru; |
47 | spinlock_t xmit_lock; |
48 | spinlock_t recv_lock; |
49 | unsigned long xmit_flags; |
50 | u32 xaccm[8]; |
51 | u32 raccm; |
52 | unsigned int bytes_sent; |
53 | unsigned int bytes_rcvd; |
54 | |
55 | struct sk_buff *tpkt; |
56 | int tpkt_pos; |
57 | u16 tfcs; |
58 | unsigned char *optr; |
59 | unsigned char *olim; |
60 | unsigned long last_xmit; |
61 | |
62 | struct sk_buff *rpkt; |
63 | int lcp_fcs; |
64 | struct sk_buff_head rqueue; |
65 | |
66 | struct tasklet_struct tsk; |
67 | |
68 | refcount_t refcnt; |
69 | struct completion dead; |
70 | struct ppp_channel chan; /* interface to generic ppp layer */ |
71 | unsigned char obuf[OBUFSIZE]; |
72 | }; |
73 | |
74 | /* Bit numbers in xmit_flags */ |
75 | #define XMIT_WAKEUP 0 |
76 | #define XMIT_FULL 1 |
77 | #define XMIT_BUSY 2 |
78 | |
79 | /* State bits */ |
80 | #define SC_TOSS 1 |
81 | #define SC_ESCAPE 2 |
82 | #define SC_PREV_ERROR 4 |
83 | |
84 | /* Bits in rbits */ |
85 | #define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP) |
86 | |
87 | static int flag_time = HZ; |
88 | module_param(flag_time, int, 0); |
89 | MODULE_PARM_DESC(flag_time, "ppp_async: interval between flagged packets (in clock ticks)" ); |
90 | MODULE_LICENSE("GPL" ); |
91 | MODULE_ALIAS_LDISC(N_PPP); |
92 | |
93 | /* |
94 | * Prototypes. |
95 | */ |
96 | static int ppp_async_encode(struct asyncppp *ap); |
97 | static int ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb); |
98 | static int ppp_async_push(struct asyncppp *ap); |
99 | static void ppp_async_flush_output(struct asyncppp *ap); |
100 | static void ppp_async_input(struct asyncppp *ap, const unsigned char *buf, |
101 | const u8 *flags, int count); |
102 | static int ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, |
103 | unsigned long arg); |
104 | static void ppp_async_process(struct tasklet_struct *t); |
105 | |
106 | static void async_lcp_peek(struct asyncppp *ap, unsigned char *data, |
107 | int len, int inbound); |
108 | |
109 | static const struct ppp_channel_ops async_ops = { |
110 | .start_xmit = ppp_async_send, |
111 | .ioctl = ppp_async_ioctl, |
112 | }; |
113 | |
114 | /* |
115 | * Routines implementing the PPP line discipline. |
116 | */ |
117 | |
118 | /* |
119 | * We have a potential race on dereferencing tty->disc_data, |
120 | * because the tty layer provides no locking at all - thus one |
121 | * cpu could be running ppp_asynctty_receive while another |
122 | * calls ppp_asynctty_close, which zeroes tty->disc_data and |
123 | * frees the memory that ppp_asynctty_receive is using. The best |
124 | * way to fix this is to use a rwlock in the tty struct, but for now |
125 | * we use a single global rwlock for all ttys in ppp line discipline. |
126 | * |
127 | * FIXME: this is no longer true. The _close path for the ldisc is |
128 | * now guaranteed to be sane. |
129 | */ |
130 | static DEFINE_RWLOCK(disc_data_lock); |
131 | |
132 | static struct asyncppp *ap_get(struct tty_struct *tty) |
133 | { |
134 | struct asyncppp *ap; |
135 | |
136 | read_lock(&disc_data_lock); |
137 | ap = tty->disc_data; |
138 | if (ap != NULL) |
139 | refcount_inc(r: &ap->refcnt); |
140 | read_unlock(&disc_data_lock); |
141 | return ap; |
142 | } |
143 | |
144 | static void ap_put(struct asyncppp *ap) |
145 | { |
146 | if (refcount_dec_and_test(r: &ap->refcnt)) |
147 | complete(&ap->dead); |
148 | } |
149 | |
150 | /* |
151 | * Called when a tty is put into PPP line discipline. Called in process |
152 | * context. |
153 | */ |
154 | static int |
155 | ppp_asynctty_open(struct tty_struct *tty) |
156 | { |
157 | struct asyncppp *ap; |
158 | int err; |
159 | int speed; |
160 | |
161 | if (tty->ops->write == NULL) |
162 | return -EOPNOTSUPP; |
163 | |
164 | err = -ENOMEM; |
165 | ap = kzalloc(size: sizeof(*ap), GFP_KERNEL); |
166 | if (!ap) |
167 | goto out; |
168 | |
169 | /* initialize the asyncppp structure */ |
170 | ap->tty = tty; |
171 | ap->mru = PPP_MRU; |
172 | spin_lock_init(&ap->xmit_lock); |
173 | spin_lock_init(&ap->recv_lock); |
174 | ap->xaccm[0] = ~0U; |
175 | ap->xaccm[3] = 0x60000000U; |
176 | ap->raccm = ~0U; |
177 | ap->optr = ap->obuf; |
178 | ap->olim = ap->obuf; |
179 | ap->lcp_fcs = -1; |
180 | |
181 | skb_queue_head_init(list: &ap->rqueue); |
182 | tasklet_setup(t: &ap->tsk, callback: ppp_async_process); |
183 | |
184 | refcount_set(r: &ap->refcnt, n: 1); |
185 | init_completion(x: &ap->dead); |
186 | |
187 | ap->chan.private = ap; |
188 | ap->chan.ops = &async_ops; |
189 | ap->chan.mtu = PPP_MRU; |
190 | speed = tty_get_baud_rate(tty); |
191 | ap->chan.speed = speed; |
192 | err = ppp_register_channel(&ap->chan); |
193 | if (err) |
194 | goto out_free; |
195 | |
196 | tty->disc_data = ap; |
197 | tty->receive_room = 65536; |
198 | return 0; |
199 | |
200 | out_free: |
201 | kfree(objp: ap); |
202 | out: |
203 | return err; |
204 | } |
205 | |
206 | /* |
207 | * Called when the tty is put into another line discipline |
208 | * or it hangs up. We have to wait for any cpu currently |
209 | * executing in any of the other ppp_asynctty_* routines to |
210 | * finish before we can call ppp_unregister_channel and free |
211 | * the asyncppp struct. This routine must be called from |
212 | * process context, not interrupt or softirq context. |
213 | */ |
214 | static void |
215 | ppp_asynctty_close(struct tty_struct *tty) |
216 | { |
217 | struct asyncppp *ap; |
218 | |
219 | write_lock_irq(&disc_data_lock); |
220 | ap = tty->disc_data; |
221 | tty->disc_data = NULL; |
222 | write_unlock_irq(&disc_data_lock); |
223 | if (!ap) |
224 | return; |
225 | |
226 | /* |
227 | * We have now ensured that nobody can start using ap from now |
228 | * on, but we have to wait for all existing users to finish. |
229 | * Note that ppp_unregister_channel ensures that no calls to |
230 | * our channel ops (i.e. ppp_async_send/ioctl) are in progress |
231 | * by the time it returns. |
232 | */ |
233 | if (!refcount_dec_and_test(r: &ap->refcnt)) |
234 | wait_for_completion(&ap->dead); |
235 | tasklet_kill(t: &ap->tsk); |
236 | |
237 | ppp_unregister_channel(&ap->chan); |
238 | kfree_skb(skb: ap->rpkt); |
239 | skb_queue_purge(list: &ap->rqueue); |
240 | kfree_skb(skb: ap->tpkt); |
241 | kfree(objp: ap); |
242 | } |
243 | |
244 | /* |
245 | * Called on tty hangup in process context. |
246 | * |
247 | * Wait for I/O to driver to complete and unregister PPP channel. |
248 | * This is already done by the close routine, so just call that. |
249 | */ |
250 | static void ppp_asynctty_hangup(struct tty_struct *tty) |
251 | { |
252 | ppp_asynctty_close(tty); |
253 | } |
254 | |
255 | /* |
256 | * Read does nothing - no data is ever available this way. |
257 | * Pppd reads and writes packets via /dev/ppp instead. |
258 | */ |
259 | static ssize_t |
260 | ppp_asynctty_read(struct tty_struct *tty, struct file *file, u8 *buf, |
261 | size_t count, void **cookie, unsigned long offset) |
262 | { |
263 | return -EAGAIN; |
264 | } |
265 | |
266 | /* |
267 | * Write on the tty does nothing, the packets all come in |
268 | * from the ppp generic stuff. |
269 | */ |
270 | static ssize_t |
271 | ppp_asynctty_write(struct tty_struct *tty, struct file *file, const u8 *buf, |
272 | size_t count) |
273 | { |
274 | return -EAGAIN; |
275 | } |
276 | |
277 | /* |
278 | * Called in process context only. May be re-entered by multiple |
279 | * ioctl calling threads. |
280 | */ |
281 | |
282 | static int |
283 | ppp_asynctty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) |
284 | { |
285 | struct asyncppp *ap = ap_get(tty); |
286 | int err, val; |
287 | int __user *p = (int __user *)arg; |
288 | |
289 | if (!ap) |
290 | return -ENXIO; |
291 | err = -EFAULT; |
292 | switch (cmd) { |
293 | case PPPIOCGCHAN: |
294 | err = -EFAULT; |
295 | if (put_user(ppp_channel_index(&ap->chan), p)) |
296 | break; |
297 | err = 0; |
298 | break; |
299 | |
300 | case PPPIOCGUNIT: |
301 | err = -EFAULT; |
302 | if (put_user(ppp_unit_number(&ap->chan), p)) |
303 | break; |
304 | err = 0; |
305 | break; |
306 | |
307 | case TCFLSH: |
308 | /* flush our buffers and the serial port's buffer */ |
309 | if (arg == TCIOFLUSH || arg == TCOFLUSH) |
310 | ppp_async_flush_output(ap); |
311 | err = n_tty_ioctl_helper(tty, cmd, arg); |
312 | break; |
313 | |
314 | case FIONREAD: |
315 | val = 0; |
316 | if (put_user(val, p)) |
317 | break; |
318 | err = 0; |
319 | break; |
320 | |
321 | default: |
322 | /* Try the various mode ioctls */ |
323 | err = tty_mode_ioctl(tty, cmd, arg); |
324 | } |
325 | |
326 | ap_put(ap); |
327 | return err; |
328 | } |
329 | |
330 | /* May sleep, don't call from interrupt level or with interrupts disabled */ |
331 | static void |
332 | ppp_asynctty_receive(struct tty_struct *tty, const u8 *buf, const u8 *cflags, |
333 | size_t count) |
334 | { |
335 | struct asyncppp *ap = ap_get(tty); |
336 | unsigned long flags; |
337 | |
338 | if (!ap) |
339 | return; |
340 | spin_lock_irqsave(&ap->recv_lock, flags); |
341 | ppp_async_input(ap, buf, flags: cflags, count); |
342 | spin_unlock_irqrestore(lock: &ap->recv_lock, flags); |
343 | if (!skb_queue_empty(list: &ap->rqueue)) |
344 | tasklet_schedule(t: &ap->tsk); |
345 | ap_put(ap); |
346 | tty_unthrottle(tty); |
347 | } |
348 | |
349 | static void |
350 | ppp_asynctty_wakeup(struct tty_struct *tty) |
351 | { |
352 | struct asyncppp *ap = ap_get(tty); |
353 | |
354 | clear_bit(TTY_DO_WRITE_WAKEUP, addr: &tty->flags); |
355 | if (!ap) |
356 | return; |
357 | set_bit(XMIT_WAKEUP, addr: &ap->xmit_flags); |
358 | tasklet_schedule(t: &ap->tsk); |
359 | ap_put(ap); |
360 | } |
361 | |
362 | |
363 | static struct tty_ldisc_ops ppp_ldisc = { |
364 | .owner = THIS_MODULE, |
365 | .num = N_PPP, |
366 | .name = "ppp" , |
367 | .open = ppp_asynctty_open, |
368 | .close = ppp_asynctty_close, |
369 | .hangup = ppp_asynctty_hangup, |
370 | .read = ppp_asynctty_read, |
371 | .write = ppp_asynctty_write, |
372 | .ioctl = ppp_asynctty_ioctl, |
373 | .receive_buf = ppp_asynctty_receive, |
374 | .write_wakeup = ppp_asynctty_wakeup, |
375 | }; |
376 | |
377 | static int __init |
378 | ppp_async_init(void) |
379 | { |
380 | int err; |
381 | |
382 | err = tty_register_ldisc(new_ldisc: &ppp_ldisc); |
383 | if (err != 0) |
384 | printk(KERN_ERR "PPP_async: error %d registering line disc.\n" , |
385 | err); |
386 | return err; |
387 | } |
388 | |
389 | /* |
390 | * The following routines provide the PPP channel interface. |
391 | */ |
392 | static int |
393 | ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg) |
394 | { |
395 | struct asyncppp *ap = chan->private; |
396 | void __user *argp = (void __user *)arg; |
397 | int __user *p = argp; |
398 | int err, val; |
399 | u32 accm[8]; |
400 | |
401 | err = -EFAULT; |
402 | switch (cmd) { |
403 | case PPPIOCGFLAGS: |
404 | val = ap->flags | ap->rbits; |
405 | if (put_user(val, p)) |
406 | break; |
407 | err = 0; |
408 | break; |
409 | case PPPIOCSFLAGS: |
410 | if (get_user(val, p)) |
411 | break; |
412 | ap->flags = val & ~SC_RCV_BITS; |
413 | spin_lock_irq(lock: &ap->recv_lock); |
414 | ap->rbits = val & SC_RCV_BITS; |
415 | spin_unlock_irq(lock: &ap->recv_lock); |
416 | err = 0; |
417 | break; |
418 | |
419 | case PPPIOCGASYNCMAP: |
420 | if (put_user(ap->xaccm[0], (u32 __user *)argp)) |
421 | break; |
422 | err = 0; |
423 | break; |
424 | case PPPIOCSASYNCMAP: |
425 | if (get_user(ap->xaccm[0], (u32 __user *)argp)) |
426 | break; |
427 | err = 0; |
428 | break; |
429 | |
430 | case PPPIOCGRASYNCMAP: |
431 | if (put_user(ap->raccm, (u32 __user *)argp)) |
432 | break; |
433 | err = 0; |
434 | break; |
435 | case PPPIOCSRASYNCMAP: |
436 | if (get_user(ap->raccm, (u32 __user *)argp)) |
437 | break; |
438 | err = 0; |
439 | break; |
440 | |
441 | case PPPIOCGXASYNCMAP: |
442 | if (copy_to_user(to: argp, from: ap->xaccm, n: sizeof(ap->xaccm))) |
443 | break; |
444 | err = 0; |
445 | break; |
446 | case PPPIOCSXASYNCMAP: |
447 | if (copy_from_user(to: accm, from: argp, n: sizeof(accm))) |
448 | break; |
449 | accm[2] &= ~0x40000000U; /* can't escape 0x5e */ |
450 | accm[3] |= 0x60000000U; /* must escape 0x7d, 0x7e */ |
451 | memcpy(ap->xaccm, accm, sizeof(ap->xaccm)); |
452 | err = 0; |
453 | break; |
454 | |
455 | case PPPIOCGMRU: |
456 | if (put_user(ap->mru, p)) |
457 | break; |
458 | err = 0; |
459 | break; |
460 | case PPPIOCSMRU: |
461 | if (get_user(val, p)) |
462 | break; |
463 | if (val < PPP_MRU) |
464 | val = PPP_MRU; |
465 | ap->mru = val; |
466 | err = 0; |
467 | break; |
468 | |
469 | default: |
470 | err = -ENOTTY; |
471 | } |
472 | |
473 | return err; |
474 | } |
475 | |
476 | /* |
477 | * This is called at softirq level to deliver received packets |
478 | * to the ppp_generic code, and to tell the ppp_generic code |
479 | * if we can accept more output now. |
480 | */ |
481 | static void ppp_async_process(struct tasklet_struct *t) |
482 | { |
483 | struct asyncppp *ap = from_tasklet(ap, t, tsk); |
484 | struct sk_buff *skb; |
485 | |
486 | /* process received packets */ |
487 | while ((skb = skb_dequeue(list: &ap->rqueue)) != NULL) { |
488 | if (skb->cb[0]) |
489 | ppp_input_error(&ap->chan, code: 0); |
490 | ppp_input(&ap->chan, skb); |
491 | } |
492 | |
493 | /* try to push more stuff out */ |
494 | if (test_bit(XMIT_WAKEUP, &ap->xmit_flags) && ppp_async_push(ap)) |
495 | ppp_output_wakeup(&ap->chan); |
496 | } |
497 | |
498 | /* |
499 | * Procedures for encapsulation and framing. |
500 | */ |
501 | |
502 | /* |
503 | * Procedure to encode the data for async serial transmission. |
504 | * Does octet stuffing (escaping), puts the address/control bytes |
505 | * on if A/C compression is disabled, and does protocol compression. |
506 | * Assumes ap->tpkt != 0 on entry. |
507 | * Returns 1 if we finished the current frame, 0 otherwise. |
508 | */ |
509 | |
510 | #define PUT_BYTE(ap, buf, c, islcp) do { \ |
511 | if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\ |
512 | *buf++ = PPP_ESCAPE; \ |
513 | *buf++ = c ^ PPP_TRANS; \ |
514 | } else \ |
515 | *buf++ = c; \ |
516 | } while (0) |
517 | |
518 | static int |
519 | ppp_async_encode(struct asyncppp *ap) |
520 | { |
521 | int fcs, i, count, c, proto; |
522 | unsigned char *buf, *buflim; |
523 | unsigned char *data; |
524 | int islcp; |
525 | |
526 | buf = ap->obuf; |
527 | ap->olim = buf; |
528 | ap->optr = buf; |
529 | i = ap->tpkt_pos; |
530 | data = ap->tpkt->data; |
531 | count = ap->tpkt->len; |
532 | fcs = ap->tfcs; |
533 | proto = get_unaligned_be16(p: data); |
534 | |
535 | /* |
536 | * LCP packets with code values between 1 (configure-reqest) |
537 | * and 7 (code-reject) must be sent as though no options |
538 | * had been negotiated. |
539 | */ |
540 | islcp = proto == PPP_LCP && 1 <= data[2] && data[2] <= 7; |
541 | |
542 | if (i == 0) { |
543 | if (islcp) |
544 | async_lcp_peek(ap, data, len: count, inbound: 0); |
545 | |
546 | /* |
547 | * Start of a new packet - insert the leading FLAG |
548 | * character if necessary. |
549 | */ |
550 | if (islcp || flag_time == 0 || |
551 | time_after_eq(jiffies, ap->last_xmit + flag_time)) |
552 | *buf++ = PPP_FLAG; |
553 | ap->last_xmit = jiffies; |
554 | fcs = PPP_INITFCS; |
555 | |
556 | /* |
557 | * Put in the address/control bytes if necessary |
558 | */ |
559 | if ((ap->flags & SC_COMP_AC) == 0 || islcp) { |
560 | PUT_BYTE(ap, buf, 0xff, islcp); |
561 | fcs = PPP_FCS(fcs, 0xff); |
562 | PUT_BYTE(ap, buf, 0x03, islcp); |
563 | fcs = PPP_FCS(fcs, 0x03); |
564 | } |
565 | } |
566 | |
567 | /* |
568 | * Once we put in the last byte, we need to put in the FCS |
569 | * and closing flag, so make sure there is at least 7 bytes |
570 | * of free space in the output buffer. |
571 | */ |
572 | buflim = ap->obuf + OBUFSIZE - 6; |
573 | while (i < count && buf < buflim) { |
574 | c = data[i++]; |
575 | if (i == 1 && c == 0 && (ap->flags & SC_COMP_PROT)) |
576 | continue; /* compress protocol field */ |
577 | fcs = PPP_FCS(fcs, c); |
578 | PUT_BYTE(ap, buf, c, islcp); |
579 | } |
580 | |
581 | if (i < count) { |
582 | /* |
583 | * Remember where we are up to in this packet. |
584 | */ |
585 | ap->olim = buf; |
586 | ap->tpkt_pos = i; |
587 | ap->tfcs = fcs; |
588 | return 0; |
589 | } |
590 | |
591 | /* |
592 | * We have finished the packet. Add the FCS and flag. |
593 | */ |
594 | fcs = ~fcs; |
595 | c = fcs & 0xff; |
596 | PUT_BYTE(ap, buf, c, islcp); |
597 | c = (fcs >> 8) & 0xff; |
598 | PUT_BYTE(ap, buf, c, islcp); |
599 | *buf++ = PPP_FLAG; |
600 | ap->olim = buf; |
601 | |
602 | consume_skb(skb: ap->tpkt); |
603 | ap->tpkt = NULL; |
604 | return 1; |
605 | } |
606 | |
607 | /* |
608 | * Transmit-side routines. |
609 | */ |
610 | |
611 | /* |
612 | * Send a packet to the peer over an async tty line. |
613 | * Returns 1 iff the packet was accepted. |
614 | * If the packet was not accepted, we will call ppp_output_wakeup |
615 | * at some later time. |
616 | */ |
617 | static int |
618 | ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb) |
619 | { |
620 | struct asyncppp *ap = chan->private; |
621 | |
622 | ppp_async_push(ap); |
623 | |
624 | if (test_and_set_bit(XMIT_FULL, addr: &ap->xmit_flags)) |
625 | return 0; /* already full */ |
626 | ap->tpkt = skb; |
627 | ap->tpkt_pos = 0; |
628 | |
629 | ppp_async_push(ap); |
630 | return 1; |
631 | } |
632 | |
633 | /* |
634 | * Push as much data as possible out to the tty. |
635 | */ |
636 | static int |
637 | ppp_async_push(struct asyncppp *ap) |
638 | { |
639 | int avail, sent, done = 0; |
640 | struct tty_struct *tty = ap->tty; |
641 | int tty_stuffed = 0; |
642 | |
643 | /* |
644 | * We can get called recursively here if the tty write |
645 | * function calls our wakeup function. This can happen |
646 | * for example on a pty with both the master and slave |
647 | * set to PPP line discipline. |
648 | * We use the XMIT_BUSY bit to detect this and get out, |
649 | * leaving the XMIT_WAKEUP bit set to tell the other |
650 | * instance that it may now be able to write more now. |
651 | */ |
652 | if (test_and_set_bit(XMIT_BUSY, addr: &ap->xmit_flags)) |
653 | return 0; |
654 | spin_lock_bh(lock: &ap->xmit_lock); |
655 | for (;;) { |
656 | if (test_and_clear_bit(XMIT_WAKEUP, addr: &ap->xmit_flags)) |
657 | tty_stuffed = 0; |
658 | if (!tty_stuffed && ap->optr < ap->olim) { |
659 | avail = ap->olim - ap->optr; |
660 | set_bit(TTY_DO_WRITE_WAKEUP, addr: &tty->flags); |
661 | sent = tty->ops->write(tty, ap->optr, avail); |
662 | if (sent < 0) |
663 | goto flush; /* error, e.g. loss of CD */ |
664 | ap->optr += sent; |
665 | if (sent < avail) |
666 | tty_stuffed = 1; |
667 | continue; |
668 | } |
669 | if (ap->optr >= ap->olim && ap->tpkt) { |
670 | if (ppp_async_encode(ap)) { |
671 | /* finished processing ap->tpkt */ |
672 | clear_bit(XMIT_FULL, addr: &ap->xmit_flags); |
673 | done = 1; |
674 | } |
675 | continue; |
676 | } |
677 | /* |
678 | * We haven't made any progress this time around. |
679 | * Clear XMIT_BUSY to let other callers in, but |
680 | * after doing so we have to check if anyone set |
681 | * XMIT_WAKEUP since we last checked it. If they |
682 | * did, we should try again to set XMIT_BUSY and go |
683 | * around again in case XMIT_BUSY was still set when |
684 | * the other caller tried. |
685 | */ |
686 | clear_bit(XMIT_BUSY, addr: &ap->xmit_flags); |
687 | /* any more work to do? if not, exit the loop */ |
688 | if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags) || |
689 | (!tty_stuffed && ap->tpkt))) |
690 | break; |
691 | /* more work to do, see if we can do it now */ |
692 | if (test_and_set_bit(XMIT_BUSY, addr: &ap->xmit_flags)) |
693 | break; |
694 | } |
695 | spin_unlock_bh(lock: &ap->xmit_lock); |
696 | return done; |
697 | |
698 | flush: |
699 | clear_bit(XMIT_BUSY, addr: &ap->xmit_flags); |
700 | if (ap->tpkt) { |
701 | kfree_skb(skb: ap->tpkt); |
702 | ap->tpkt = NULL; |
703 | clear_bit(XMIT_FULL, addr: &ap->xmit_flags); |
704 | done = 1; |
705 | } |
706 | ap->optr = ap->olim; |
707 | spin_unlock_bh(lock: &ap->xmit_lock); |
708 | return done; |
709 | } |
710 | |
711 | /* |
712 | * Flush output from our internal buffers. |
713 | * Called for the TCFLSH ioctl. Can be entered in parallel |
714 | * but this is covered by the xmit_lock. |
715 | */ |
716 | static void |
717 | ppp_async_flush_output(struct asyncppp *ap) |
718 | { |
719 | int done = 0; |
720 | |
721 | spin_lock_bh(lock: &ap->xmit_lock); |
722 | ap->optr = ap->olim; |
723 | if (ap->tpkt != NULL) { |
724 | kfree_skb(skb: ap->tpkt); |
725 | ap->tpkt = NULL; |
726 | clear_bit(XMIT_FULL, addr: &ap->xmit_flags); |
727 | done = 1; |
728 | } |
729 | spin_unlock_bh(lock: &ap->xmit_lock); |
730 | if (done) |
731 | ppp_output_wakeup(&ap->chan); |
732 | } |
733 | |
734 | /* |
735 | * Receive-side routines. |
736 | */ |
737 | |
738 | /* see how many ordinary chars there are at the start of buf */ |
739 | static inline int |
740 | scan_ordinary(struct asyncppp *ap, const unsigned char *buf, int count) |
741 | { |
742 | int i, c; |
743 | |
744 | for (i = 0; i < count; ++i) { |
745 | c = buf[i]; |
746 | if (c == PPP_ESCAPE || c == PPP_FLAG || |
747 | (c < 0x20 && (ap->raccm & (1 << c)) != 0)) |
748 | break; |
749 | } |
750 | return i; |
751 | } |
752 | |
753 | /* called when a flag is seen - do end-of-packet processing */ |
754 | static void |
755 | process_input_packet(struct asyncppp *ap) |
756 | { |
757 | struct sk_buff *skb; |
758 | unsigned char *p; |
759 | unsigned int len, fcs; |
760 | |
761 | skb = ap->rpkt; |
762 | if (ap->state & (SC_TOSS | SC_ESCAPE)) |
763 | goto err; |
764 | |
765 | if (skb == NULL) |
766 | return; /* 0-length packet */ |
767 | |
768 | /* check the FCS */ |
769 | p = skb->data; |
770 | len = skb->len; |
771 | if (len < 3) |
772 | goto err; /* too short */ |
773 | fcs = PPP_INITFCS; |
774 | for (; len > 0; --len) |
775 | fcs = PPP_FCS(fcs, *p++); |
776 | if (fcs != PPP_GOODFCS) |
777 | goto err; /* bad FCS */ |
778 | skb_trim(skb, len: skb->len - 2); |
779 | |
780 | /* check for address/control and protocol compression */ |
781 | p = skb->data; |
782 | if (p[0] == PPP_ALLSTATIONS) { |
783 | /* chop off address/control */ |
784 | if (p[1] != PPP_UI || skb->len < 3) |
785 | goto err; |
786 | p = skb_pull(skb, len: 2); |
787 | } |
788 | |
789 | /* If protocol field is not compressed, it can be LCP packet */ |
790 | if (!(p[0] & 0x01)) { |
791 | unsigned int proto; |
792 | |
793 | if (skb->len < 2) |
794 | goto err; |
795 | proto = (p[0] << 8) + p[1]; |
796 | if (proto == PPP_LCP) |
797 | async_lcp_peek(ap, data: p, len: skb->len, inbound: 1); |
798 | } |
799 | |
800 | /* queue the frame to be processed */ |
801 | skb->cb[0] = ap->state; |
802 | skb_queue_tail(list: &ap->rqueue, newsk: skb); |
803 | ap->rpkt = NULL; |
804 | ap->state = 0; |
805 | return; |
806 | |
807 | err: |
808 | /* frame had an error, remember that, reset SC_TOSS & SC_ESCAPE */ |
809 | ap->state = SC_PREV_ERROR; |
810 | if (skb) { |
811 | /* make skb appear as freshly allocated */ |
812 | skb_trim(skb, len: 0); |
813 | skb_reserve(skb, len: - skb_headroom(skb)); |
814 | } |
815 | } |
816 | |
817 | /* Called when the tty driver has data for us. Runs parallel with the |
818 | other ldisc functions but will not be re-entered */ |
819 | |
820 | static void |
821 | ppp_async_input(struct asyncppp *ap, const u8 *buf, const u8 *flags, int count) |
822 | { |
823 | struct sk_buff *skb; |
824 | int c, i, j, n, s, f; |
825 | unsigned char *sp; |
826 | |
827 | /* update bits used for 8-bit cleanness detection */ |
828 | if (~ap->rbits & SC_RCV_BITS) { |
829 | s = 0; |
830 | for (i = 0; i < count; ++i) { |
831 | c = buf[i]; |
832 | if (flags && flags[i] != 0) |
833 | continue; |
834 | s |= (c & 0x80)? SC_RCV_B7_1: SC_RCV_B7_0; |
835 | c = ((c >> 4) ^ c) & 0xf; |
836 | s |= (0x6996 & (1 << c))? SC_RCV_ODDP: SC_RCV_EVNP; |
837 | } |
838 | ap->rbits |= s; |
839 | } |
840 | |
841 | while (count > 0) { |
842 | /* scan through and see how many chars we can do in bulk */ |
843 | if ((ap->state & SC_ESCAPE) && buf[0] == PPP_ESCAPE) |
844 | n = 1; |
845 | else |
846 | n = scan_ordinary(ap, buf, count); |
847 | |
848 | f = 0; |
849 | if (flags && (ap->state & SC_TOSS) == 0) { |
850 | /* check the flags to see if any char had an error */ |
851 | for (j = 0; j < n; ++j) |
852 | if ((f = flags[j]) != 0) |
853 | break; |
854 | } |
855 | if (f != 0) { |
856 | /* start tossing */ |
857 | ap->state |= SC_TOSS; |
858 | |
859 | } else if (n > 0 && (ap->state & SC_TOSS) == 0) { |
860 | /* stuff the chars in the skb */ |
861 | skb = ap->rpkt; |
862 | if (!skb) { |
863 | skb = dev_alloc_skb(length: ap->mru + PPP_HDRLEN + 2); |
864 | if (!skb) |
865 | goto nomem; |
866 | ap->rpkt = skb; |
867 | } |
868 | if (skb->len == 0) { |
869 | /* Try to get the payload 4-byte aligned. |
870 | * This should match the |
871 | * PPP_ALLSTATIONS/PPP_UI/compressed tests in |
872 | * process_input_packet, but we do not have |
873 | * enough chars here to test buf[1] and buf[2]. |
874 | */ |
875 | if (buf[0] != PPP_ALLSTATIONS) |
876 | skb_reserve(skb, len: 2 + (buf[0] & 1)); |
877 | } |
878 | if (n > skb_tailroom(skb)) { |
879 | /* packet overflowed MRU */ |
880 | ap->state |= SC_TOSS; |
881 | } else { |
882 | sp = skb_put_data(skb, data: buf, len: n); |
883 | if (ap->state & SC_ESCAPE) { |
884 | sp[0] ^= PPP_TRANS; |
885 | ap->state &= ~SC_ESCAPE; |
886 | } |
887 | } |
888 | } |
889 | |
890 | if (n >= count) |
891 | break; |
892 | |
893 | c = buf[n]; |
894 | if (flags != NULL && flags[n] != 0) { |
895 | ap->state |= SC_TOSS; |
896 | } else if (c == PPP_FLAG) { |
897 | process_input_packet(ap); |
898 | } else if (c == PPP_ESCAPE) { |
899 | ap->state |= SC_ESCAPE; |
900 | } else if (I_IXON(ap->tty)) { |
901 | if (c == START_CHAR(ap->tty)) |
902 | start_tty(tty: ap->tty); |
903 | else if (c == STOP_CHAR(ap->tty)) |
904 | stop_tty(tty: ap->tty); |
905 | } |
906 | /* otherwise it's a char in the recv ACCM */ |
907 | ++n; |
908 | |
909 | buf += n; |
910 | if (flags) |
911 | flags += n; |
912 | count -= n; |
913 | } |
914 | return; |
915 | |
916 | nomem: |
917 | printk(KERN_ERR "PPPasync: no memory (input pkt)\n" ); |
918 | ap->state |= SC_TOSS; |
919 | } |
920 | |
921 | /* |
922 | * We look at LCP frames going past so that we can notice |
923 | * and react to the LCP configure-ack from the peer. |
924 | * In the situation where the peer has been sent a configure-ack |
925 | * already, LCP is up once it has sent its configure-ack |
926 | * so the immediately following packet can be sent with the |
927 | * configured LCP options. This allows us to process the following |
928 | * packet correctly without pppd needing to respond quickly. |
929 | * |
930 | * We only respond to the received configure-ack if we have just |
931 | * sent a configure-request, and the configure-ack contains the |
932 | * same data (this is checked using a 16-bit crc of the data). |
933 | */ |
934 | #define CONFREQ 1 /* LCP code field values */ |
935 | #define CONFACK 2 |
936 | #define LCP_MRU 1 /* LCP option numbers */ |
937 | #define LCP_ASYNCMAP 2 |
938 | |
939 | static void async_lcp_peek(struct asyncppp *ap, unsigned char *data, |
940 | int len, int inbound) |
941 | { |
942 | int dlen, fcs, i, code; |
943 | u32 val; |
944 | |
945 | data += 2; /* skip protocol bytes */ |
946 | len -= 2; |
947 | if (len < 4) /* 4 = code, ID, length */ |
948 | return; |
949 | code = data[0]; |
950 | if (code != CONFACK && code != CONFREQ) |
951 | return; |
952 | dlen = get_unaligned_be16(p: data + 2); |
953 | if (len < dlen) |
954 | return; /* packet got truncated or length is bogus */ |
955 | |
956 | if (code == (inbound? CONFACK: CONFREQ)) { |
957 | /* |
958 | * sent confreq or received confack: |
959 | * calculate the crc of the data from the ID field on. |
960 | */ |
961 | fcs = PPP_INITFCS; |
962 | for (i = 1; i < dlen; ++i) |
963 | fcs = PPP_FCS(fcs, data[i]); |
964 | |
965 | if (!inbound) { |
966 | /* outbound confreq - remember the crc for later */ |
967 | ap->lcp_fcs = fcs; |
968 | return; |
969 | } |
970 | |
971 | /* received confack, check the crc */ |
972 | fcs ^= ap->lcp_fcs; |
973 | ap->lcp_fcs = -1; |
974 | if (fcs != 0) |
975 | return; |
976 | } else if (inbound) |
977 | return; /* not interested in received confreq */ |
978 | |
979 | /* process the options in the confack */ |
980 | data += 4; |
981 | dlen -= 4; |
982 | /* data[0] is code, data[1] is length */ |
983 | while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) { |
984 | switch (data[0]) { |
985 | case LCP_MRU: |
986 | val = get_unaligned_be16(p: data + 2); |
987 | if (inbound) |
988 | ap->mru = val; |
989 | else |
990 | ap->chan.mtu = val; |
991 | break; |
992 | case LCP_ASYNCMAP: |
993 | val = get_unaligned_be32(p: data + 2); |
994 | if (inbound) |
995 | ap->raccm = val; |
996 | else |
997 | ap->xaccm[0] = val; |
998 | break; |
999 | } |
1000 | dlen -= data[1]; |
1001 | data += data[1]; |
1002 | } |
1003 | } |
1004 | |
1005 | static void __exit ppp_async_cleanup(void) |
1006 | { |
1007 | tty_unregister_ldisc(ldisc: &ppp_ldisc); |
1008 | } |
1009 | |
1010 | module_init(ppp_async_init); |
1011 | module_exit(ppp_async_cleanup); |
1012 | |