root/drivers/net/ppp/ppp_synctty.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ppp_print_buffer
  2. sp_get
  3. sp_put
  4. ppp_sync_open
  5. ppp_sync_close
  6. ppp_sync_hangup
  7. ppp_sync_read
  8. ppp_sync_write
  9. ppp_synctty_ioctl
  10. ppp_sync_poll
  11. ppp_sync_receive
  12. ppp_sync_wakeup
  13. ppp_sync_init
  14. ppp_sync_ioctl
  15. ppp_sync_process
  16. ppp_sync_txmunge
  17. ppp_sync_send
  18. ppp_sync_push
  19. ppp_sync_flush_output
  20. ppp_sync_input
  21. ppp_sync_cleanup

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * PPP synchronous tty channel driver for Linux.
   4  *
   5  * This is a ppp channel driver that can be used with tty device drivers
   6  * that are frame oriented, such as synchronous HDLC devices.
   7  *
   8  * Complete PPP frames without encoding/decoding are exchanged between
   9  * the channel driver and the device driver.
  10  *
  11  * The async map IOCTL codes are implemented to keep the user mode
  12  * applications happy if they call them. Synchronous PPP does not use
  13  * the async maps.
  14  *
  15  * Copyright 1999 Paul Mackerras.
  16  *
  17  * Also touched by the grubby hands of Paul Fulghum paulkf@microgate.com
  18  *
  19  * This driver provides the encapsulation and framing for sending
  20  * and receiving PPP frames over sync serial lines.  It relies on
  21  * the generic PPP layer to give it frames to send and to process
  22  * received frames.  It implements the PPP line discipline.
  23  *
  24  * Part of the code in this driver was inspired by the old async-only
  25  * PPP driver, written by Michael Callahan and Al Longyear, and
  26  * subsequently hacked by Paul Mackerras.
  27  *
  28  * ==FILEVERSION 20040616==
  29  */
  30 
  31 #include <linux/module.h>
  32 #include <linux/kernel.h>
  33 #include <linux/skbuff.h>
  34 #include <linux/tty.h>
  35 #include <linux/netdevice.h>
  36 #include <linux/poll.h>
  37 #include <linux/ppp_defs.h>
  38 #include <linux/ppp-ioctl.h>
  39 #include <linux/ppp_channel.h>
  40 #include <linux/spinlock.h>
  41 #include <linux/completion.h>
  42 #include <linux/init.h>
  43 #include <linux/interrupt.h>
  44 #include <linux/slab.h>
  45 #include <linux/refcount.h>
  46 #include <asm/unaligned.h>
  47 #include <linux/uaccess.h>
  48 
  49 #define PPP_VERSION     "2.4.2"
  50 
  51 /* Structure for storing local state. */
  52 struct syncppp {
  53         struct tty_struct *tty;
  54         unsigned int    flags;
  55         unsigned int    rbits;
  56         int             mru;
  57         spinlock_t      xmit_lock;
  58         spinlock_t      recv_lock;
  59         unsigned long   xmit_flags;
  60         u32             xaccm[8];
  61         u32             raccm;
  62         unsigned int    bytes_sent;
  63         unsigned int    bytes_rcvd;
  64 
  65         struct sk_buff  *tpkt;
  66         unsigned long   last_xmit;
  67 
  68         struct sk_buff_head rqueue;
  69 
  70         struct tasklet_struct tsk;
  71 
  72         refcount_t      refcnt;
  73         struct completion dead_cmp;
  74         struct ppp_channel chan;        /* interface to generic ppp layer */
  75 };
  76 
  77 /* Bit numbers in xmit_flags */
  78 #define XMIT_WAKEUP     0
  79 #define XMIT_FULL       1
  80 
  81 /* Bits in rbits */
  82 #define SC_RCV_BITS     (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
  83 
  84 #define PPPSYNC_MAX_RQLEN       32      /* arbitrary */
  85 
  86 /*
  87  * Prototypes.
  88  */
  89 static struct sk_buff* ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *);
  90 static int ppp_sync_send(struct ppp_channel *chan, struct sk_buff *skb);
  91 static int ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd,
  92                           unsigned long arg);
  93 static void ppp_sync_process(unsigned long arg);
  94 static int ppp_sync_push(struct syncppp *ap);
  95 static void ppp_sync_flush_output(struct syncppp *ap);
  96 static void ppp_sync_input(struct syncppp *ap, const unsigned char *buf,
  97                            char *flags, int count);
  98 
  99 static const struct ppp_channel_ops sync_ops = {
 100         .start_xmit = ppp_sync_send,
 101         .ioctl      = ppp_sync_ioctl,
 102 };
 103 
 104 /*
 105  * Utility procedure to print a buffer in hex/ascii
 106  */
 107 static void
 108 ppp_print_buffer (const char *name, const __u8 *buf, int count)
 109 {
 110         if (name != NULL)
 111                 printk(KERN_DEBUG "ppp_synctty: %s, count = %d\n", name, count);
 112 
 113         print_hex_dump_bytes("", DUMP_PREFIX_NONE, buf, count);
 114 }
 115 
 116 
 117 /*
 118  * Routines implementing the synchronous PPP line discipline.
 119  */
 120 
 121 /*
 122  * We have a potential race on dereferencing tty->disc_data,
 123  * because the tty layer provides no locking at all - thus one
 124  * cpu could be running ppp_synctty_receive while another
 125  * calls ppp_synctty_close, which zeroes tty->disc_data and
 126  * frees the memory that ppp_synctty_receive is using.  The best
 127  * way to fix this is to use a rwlock in the tty struct, but for now
 128  * we use a single global rwlock for all ttys in ppp line discipline.
 129  *
 130  * FIXME: Fixed in tty_io nowadays.
 131  */
 132 static DEFINE_RWLOCK(disc_data_lock);
 133 
 134 static struct syncppp *sp_get(struct tty_struct *tty)
 135 {
 136         struct syncppp *ap;
 137 
 138         read_lock(&disc_data_lock);
 139         ap = tty->disc_data;
 140         if (ap != NULL)
 141                 refcount_inc(&ap->refcnt);
 142         read_unlock(&disc_data_lock);
 143         return ap;
 144 }
 145 
 146 static void sp_put(struct syncppp *ap)
 147 {
 148         if (refcount_dec_and_test(&ap->refcnt))
 149                 complete(&ap->dead_cmp);
 150 }
 151 
 152 /*
 153  * Called when a tty is put into sync-PPP line discipline.
 154  */
 155 static int
 156 ppp_sync_open(struct tty_struct *tty)
 157 {
 158         struct syncppp *ap;
 159         int err;
 160         int speed;
 161 
 162         if (tty->ops->write == NULL)
 163                 return -EOPNOTSUPP;
 164 
 165         ap = kzalloc(sizeof(*ap), GFP_KERNEL);
 166         err = -ENOMEM;
 167         if (!ap)
 168                 goto out;
 169 
 170         /* initialize the syncppp structure */
 171         ap->tty = tty;
 172         ap->mru = PPP_MRU;
 173         spin_lock_init(&ap->xmit_lock);
 174         spin_lock_init(&ap->recv_lock);
 175         ap->xaccm[0] = ~0U;
 176         ap->xaccm[3] = 0x60000000U;
 177         ap->raccm = ~0U;
 178 
 179         skb_queue_head_init(&ap->rqueue);
 180         tasklet_init(&ap->tsk, ppp_sync_process, (unsigned long) ap);
 181 
 182         refcount_set(&ap->refcnt, 1);
 183         init_completion(&ap->dead_cmp);
 184 
 185         ap->chan.private = ap;
 186         ap->chan.ops = &sync_ops;
 187         ap->chan.mtu = PPP_MRU;
 188         ap->chan.hdrlen = 2;    /* for A/C bytes */
 189         speed = tty_get_baud_rate(tty);
 190         ap->chan.speed = speed;
 191         err = ppp_register_channel(&ap->chan);
 192         if (err)
 193                 goto out_free;
 194 
 195         tty->disc_data = ap;
 196         tty->receive_room = 65536;
 197         return 0;
 198 
 199  out_free:
 200         kfree(ap);
 201  out:
 202         return err;
 203 }
 204 
 205 /*
 206  * Called when the tty is put into another line discipline
 207  * or it hangs up.  We have to wait for any cpu currently
 208  * executing in any of the other ppp_synctty_* routines to
 209  * finish before we can call ppp_unregister_channel and free
 210  * the syncppp struct.  This routine must be called from
 211  * process context, not interrupt or softirq context.
 212  */
 213 static void
 214 ppp_sync_close(struct tty_struct *tty)
 215 {
 216         struct syncppp *ap;
 217 
 218         write_lock_irq(&disc_data_lock);
 219         ap = tty->disc_data;
 220         tty->disc_data = NULL;
 221         write_unlock_irq(&disc_data_lock);
 222         if (!ap)
 223                 return;
 224 
 225         /*
 226          * We have now ensured that nobody can start using ap from now
 227          * on, but we have to wait for all existing users to finish.
 228          * Note that ppp_unregister_channel ensures that no calls to
 229          * our channel ops (i.e. ppp_sync_send/ioctl) are in progress
 230          * by the time it returns.
 231          */
 232         if (!refcount_dec_and_test(&ap->refcnt))
 233                 wait_for_completion(&ap->dead_cmp);
 234         tasklet_kill(&ap->tsk);
 235 
 236         ppp_unregister_channel(&ap->chan);
 237         skb_queue_purge(&ap->rqueue);
 238         kfree_skb(ap->tpkt);
 239         kfree(ap);
 240 }
 241 
 242 /*
 243  * Called on tty hangup in process context.
 244  *
 245  * Wait for I/O to driver to complete and unregister PPP channel.
 246  * This is already done by the close routine, so just call that.
 247  */
 248 static int ppp_sync_hangup(struct tty_struct *tty)
 249 {
 250         ppp_sync_close(tty);
 251         return 0;
 252 }
 253 
 254 /*
 255  * Read does nothing - no data is ever available this way.
 256  * Pppd reads and writes packets via /dev/ppp instead.
 257  */
 258 static ssize_t
 259 ppp_sync_read(struct tty_struct *tty, struct file *file,
 260                unsigned char __user *buf, size_t count)
 261 {
 262         return -EAGAIN;
 263 }
 264 
 265 /*
 266  * Write on the tty does nothing, the packets all come in
 267  * from the ppp generic stuff.
 268  */
 269 static ssize_t
 270 ppp_sync_write(struct tty_struct *tty, struct file *file,
 271                 const unsigned char *buf, size_t count)
 272 {
 273         return -EAGAIN;
 274 }
 275 
 276 static int
 277 ppp_synctty_ioctl(struct tty_struct *tty, struct file *file,
 278                   unsigned int cmd, unsigned long arg)
 279 {
 280         struct syncppp *ap = sp_get(tty);
 281         int __user *p = (int __user *)arg;
 282         int err, val;
 283 
 284         if (!ap)
 285                 return -ENXIO;
 286         err = -EFAULT;
 287         switch (cmd) {
 288         case PPPIOCGCHAN:
 289                 err = -EFAULT;
 290                 if (put_user(ppp_channel_index(&ap->chan), p))
 291                         break;
 292                 err = 0;
 293                 break;
 294 
 295         case PPPIOCGUNIT:
 296                 err = -EFAULT;
 297                 if (put_user(ppp_unit_number(&ap->chan), p))
 298                         break;
 299                 err = 0;
 300                 break;
 301 
 302         case TCFLSH:
 303                 /* flush our buffers and the serial port's buffer */
 304                 if (arg == TCIOFLUSH || arg == TCOFLUSH)
 305                         ppp_sync_flush_output(ap);
 306                 err = n_tty_ioctl_helper(tty, file, cmd, arg);
 307                 break;
 308 
 309         case FIONREAD:
 310                 val = 0;
 311                 if (put_user(val, p))
 312                         break;
 313                 err = 0;
 314                 break;
 315 
 316         default:
 317                 err = tty_mode_ioctl(tty, file, cmd, arg);
 318                 break;
 319         }
 320 
 321         sp_put(ap);
 322         return err;
 323 }
 324 
 325 /* No kernel lock - fine */
 326 static __poll_t
 327 ppp_sync_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
 328 {
 329         return 0;
 330 }
 331 
 332 /* May sleep, don't call from interrupt level or with interrupts disabled */
 333 static void
 334 ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
 335                   char *cflags, int count)
 336 {
 337         struct syncppp *ap = sp_get(tty);
 338         unsigned long flags;
 339 
 340         if (!ap)
 341                 return;
 342         spin_lock_irqsave(&ap->recv_lock, flags);
 343         ppp_sync_input(ap, buf, cflags, count);
 344         spin_unlock_irqrestore(&ap->recv_lock, flags);
 345         if (!skb_queue_empty(&ap->rqueue))
 346                 tasklet_schedule(&ap->tsk);
 347         sp_put(ap);
 348         tty_unthrottle(tty);
 349 }
 350 
 351 static void
 352 ppp_sync_wakeup(struct tty_struct *tty)
 353 {
 354         struct syncppp *ap = sp_get(tty);
 355 
 356         clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
 357         if (!ap)
 358                 return;
 359         set_bit(XMIT_WAKEUP, &ap->xmit_flags);
 360         tasklet_schedule(&ap->tsk);
 361         sp_put(ap);
 362 }
 363 
 364 
 365 static struct tty_ldisc_ops ppp_sync_ldisc = {
 366         .owner  = THIS_MODULE,
 367         .magic  = TTY_LDISC_MAGIC,
 368         .name   = "pppsync",
 369         .open   = ppp_sync_open,
 370         .close  = ppp_sync_close,
 371         .hangup = ppp_sync_hangup,
 372         .read   = ppp_sync_read,
 373         .write  = ppp_sync_write,
 374         .ioctl  = ppp_synctty_ioctl,
 375         .poll   = ppp_sync_poll,
 376         .receive_buf = ppp_sync_receive,
 377         .write_wakeup = ppp_sync_wakeup,
 378 };
 379 
 380 static int __init
 381 ppp_sync_init(void)
 382 {
 383         int err;
 384 
 385         err = tty_register_ldisc(N_SYNC_PPP, &ppp_sync_ldisc);
 386         if (err != 0)
 387                 printk(KERN_ERR "PPP_sync: error %d registering line disc.\n",
 388                        err);
 389         return err;
 390 }
 391 
 392 /*
 393  * The following routines provide the PPP channel interface.
 394  */
 395 static int
 396 ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
 397 {
 398         struct syncppp *ap = chan->private;
 399         int err, val;
 400         u32 accm[8];
 401         void __user *argp = (void __user *)arg;
 402         u32 __user *p = argp;
 403 
 404         err = -EFAULT;
 405         switch (cmd) {
 406         case PPPIOCGFLAGS:
 407                 val = ap->flags | ap->rbits;
 408                 if (put_user(val, (int __user *) argp))
 409                         break;
 410                 err = 0;
 411                 break;
 412         case PPPIOCSFLAGS:
 413                 if (get_user(val, (int __user *) argp))
 414                         break;
 415                 ap->flags = val & ~SC_RCV_BITS;
 416                 spin_lock_irq(&ap->recv_lock);
 417                 ap->rbits = val & SC_RCV_BITS;
 418                 spin_unlock_irq(&ap->recv_lock);
 419                 err = 0;
 420                 break;
 421 
 422         case PPPIOCGASYNCMAP:
 423                 if (put_user(ap->xaccm[0], p))
 424                         break;
 425                 err = 0;
 426                 break;
 427         case PPPIOCSASYNCMAP:
 428                 if (get_user(ap->xaccm[0], p))
 429                         break;
 430                 err = 0;
 431                 break;
 432 
 433         case PPPIOCGRASYNCMAP:
 434                 if (put_user(ap->raccm, p))
 435                         break;
 436                 err = 0;
 437                 break;
 438         case PPPIOCSRASYNCMAP:
 439                 if (get_user(ap->raccm, p))
 440                         break;
 441                 err = 0;
 442                 break;
 443 
 444         case PPPIOCGXASYNCMAP:
 445                 if (copy_to_user(argp, ap->xaccm, sizeof(ap->xaccm)))
 446                         break;
 447                 err = 0;
 448                 break;
 449         case PPPIOCSXASYNCMAP:
 450                 if (copy_from_user(accm, argp, sizeof(accm)))
 451                         break;
 452                 accm[2] &= ~0x40000000U;        /* can't escape 0x5e */
 453                 accm[3] |= 0x60000000U;         /* must escape 0x7d, 0x7e */
 454                 memcpy(ap->xaccm, accm, sizeof(ap->xaccm));
 455                 err = 0;
 456                 break;
 457 
 458         case PPPIOCGMRU:
 459                 if (put_user(ap->mru, (int __user *) argp))
 460                         break;
 461                 err = 0;
 462                 break;
 463         case PPPIOCSMRU:
 464                 if (get_user(val, (int __user *) argp))
 465                         break;
 466                 if (val < PPP_MRU)
 467                         val = PPP_MRU;
 468                 ap->mru = val;
 469                 err = 0;
 470                 break;
 471 
 472         default:
 473                 err = -ENOTTY;
 474         }
 475         return err;
 476 }
 477 
 478 /*
 479  * This is called at softirq level to deliver received packets
 480  * to the ppp_generic code, and to tell the ppp_generic code
 481  * if we can accept more output now.
 482  */
 483 static void ppp_sync_process(unsigned long arg)
 484 {
 485         struct syncppp *ap = (struct syncppp *) arg;
 486         struct sk_buff *skb;
 487 
 488         /* process received packets */
 489         while ((skb = skb_dequeue(&ap->rqueue)) != NULL) {
 490                 if (skb->len == 0) {
 491                         /* zero length buffers indicate error */
 492                         ppp_input_error(&ap->chan, 0);
 493                         kfree_skb(skb);
 494                 }
 495                 else
 496                         ppp_input(&ap->chan, skb);
 497         }
 498 
 499         /* try to push more stuff out */
 500         if (test_bit(XMIT_WAKEUP, &ap->xmit_flags) && ppp_sync_push(ap))
 501                 ppp_output_wakeup(&ap->chan);
 502 }
 503 
 504 /*
 505  * Procedures for encapsulation and framing.
 506  */
 507 
 508 static struct sk_buff*
 509 ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb)
 510 {
 511         int proto;
 512         unsigned char *data;
 513         int islcp;
 514 
 515         data  = skb->data;
 516         proto = get_unaligned_be16(data);
 517 
 518         /* LCP packets with codes between 1 (configure-request)
 519          * and 7 (code-reject) must be sent as though no options
 520          * have been negotiated.
 521          */
 522         islcp = proto == PPP_LCP && 1 <= data[2] && data[2] <= 7;
 523 
 524         /* compress protocol field if option enabled */
 525         if (data[0] == 0 && (ap->flags & SC_COMP_PROT) && !islcp)
 526                 skb_pull(skb,1);
 527 
 528         /* prepend address/control fields if necessary */
 529         if ((ap->flags & SC_COMP_AC) == 0 || islcp) {
 530                 if (skb_headroom(skb) < 2) {
 531                         struct sk_buff *npkt = dev_alloc_skb(skb->len + 2);
 532                         if (npkt == NULL) {
 533                                 kfree_skb(skb);
 534                                 return NULL;
 535                         }
 536                         skb_reserve(npkt,2);
 537                         skb_copy_from_linear_data(skb,
 538                                       skb_put(npkt, skb->len), skb->len);
 539                         consume_skb(skb);
 540                         skb = npkt;
 541                 }
 542                 skb_push(skb,2);
 543                 skb->data[0] = PPP_ALLSTATIONS;
 544                 skb->data[1] = PPP_UI;
 545         }
 546 
 547         ap->last_xmit = jiffies;
 548 
 549         if (skb && ap->flags & SC_LOG_OUTPKT)
 550                 ppp_print_buffer ("send buffer", skb->data, skb->len);
 551 
 552         return skb;
 553 }
 554 
 555 /*
 556  * Transmit-side routines.
 557  */
 558 
 559 /*
 560  * Send a packet to the peer over an sync tty line.
 561  * Returns 1 iff the packet was accepted.
 562  * If the packet was not accepted, we will call ppp_output_wakeup
 563  * at some later time.
 564  */
 565 static int
 566 ppp_sync_send(struct ppp_channel *chan, struct sk_buff *skb)
 567 {
 568         struct syncppp *ap = chan->private;
 569 
 570         ppp_sync_push(ap);
 571 
 572         if (test_and_set_bit(XMIT_FULL, &ap->xmit_flags))
 573                 return 0;       /* already full */
 574         skb = ppp_sync_txmunge(ap, skb);
 575         if (skb != NULL)
 576                 ap->tpkt = skb;
 577         else
 578                 clear_bit(XMIT_FULL, &ap->xmit_flags);
 579 
 580         ppp_sync_push(ap);
 581         return 1;
 582 }
 583 
 584 /*
 585  * Push as much data as possible out to the tty.
 586  */
 587 static int
 588 ppp_sync_push(struct syncppp *ap)
 589 {
 590         int sent, done = 0;
 591         struct tty_struct *tty = ap->tty;
 592         int tty_stuffed = 0;
 593 
 594         if (!spin_trylock_bh(&ap->xmit_lock))
 595                 return 0;
 596         for (;;) {
 597                 if (test_and_clear_bit(XMIT_WAKEUP, &ap->xmit_flags))
 598                         tty_stuffed = 0;
 599                 if (!tty_stuffed && ap->tpkt) {
 600                         set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
 601                         sent = tty->ops->write(tty, ap->tpkt->data, ap->tpkt->len);
 602                         if (sent < 0)
 603                                 goto flush;     /* error, e.g. loss of CD */
 604                         if (sent < ap->tpkt->len) {
 605                                 tty_stuffed = 1;
 606                         } else {
 607                                 consume_skb(ap->tpkt);
 608                                 ap->tpkt = NULL;
 609                                 clear_bit(XMIT_FULL, &ap->xmit_flags);
 610                                 done = 1;
 611                         }
 612                         continue;
 613                 }
 614                 /* haven't made any progress */
 615                 spin_unlock_bh(&ap->xmit_lock);
 616                 if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags) ||
 617                       (!tty_stuffed && ap->tpkt)))
 618                         break;
 619                 if (!spin_trylock_bh(&ap->xmit_lock))
 620                         break;
 621         }
 622         return done;
 623 
 624 flush:
 625         if (ap->tpkt) {
 626                 kfree_skb(ap->tpkt);
 627                 ap->tpkt = NULL;
 628                 clear_bit(XMIT_FULL, &ap->xmit_flags);
 629                 done = 1;
 630         }
 631         spin_unlock_bh(&ap->xmit_lock);
 632         return done;
 633 }
 634 
 635 /*
 636  * Flush output from our internal buffers.
 637  * Called for the TCFLSH ioctl.
 638  */
 639 static void
 640 ppp_sync_flush_output(struct syncppp *ap)
 641 {
 642         int done = 0;
 643 
 644         spin_lock_bh(&ap->xmit_lock);
 645         if (ap->tpkt != NULL) {
 646                 kfree_skb(ap->tpkt);
 647                 ap->tpkt = NULL;
 648                 clear_bit(XMIT_FULL, &ap->xmit_flags);
 649                 done = 1;
 650         }
 651         spin_unlock_bh(&ap->xmit_lock);
 652         if (done)
 653                 ppp_output_wakeup(&ap->chan);
 654 }
 655 
 656 /*
 657  * Receive-side routines.
 658  */
 659 
 660 /* called when the tty driver has data for us.
 661  *
 662  * Data is frame oriented: each call to ppp_sync_input is considered
 663  * a whole frame. If the 1st flag byte is non-zero then the whole
 664  * frame is considered to be in error and is tossed.
 665  */
 666 static void
 667 ppp_sync_input(struct syncppp *ap, const unsigned char *buf,
 668                 char *flags, int count)
 669 {
 670         struct sk_buff *skb;
 671         unsigned char *p;
 672 
 673         if (count == 0)
 674                 return;
 675 
 676         if (ap->flags & SC_LOG_INPKT)
 677                 ppp_print_buffer ("receive buffer", buf, count);
 678 
 679         /* stuff the chars in the skb */
 680         skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2);
 681         if (!skb) {
 682                 printk(KERN_ERR "PPPsync: no memory (input pkt)\n");
 683                 goto err;
 684         }
 685         /* Try to get the payload 4-byte aligned */
 686         if (buf[0] != PPP_ALLSTATIONS)
 687                 skb_reserve(skb, 2 + (buf[0] & 1));
 688 
 689         if (flags && *flags) {
 690                 /* error flag set, ignore frame */
 691                 goto err;
 692         } else if (count > skb_tailroom(skb)) {
 693                 /* packet overflowed MRU */
 694                 goto err;
 695         }
 696 
 697         skb_put_data(skb, buf, count);
 698 
 699         /* strip address/control field if present */
 700         p = skb->data;
 701         if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
 702                 /* chop off address/control */
 703                 if (skb->len < 3)
 704                         goto err;
 705                 p = skb_pull(skb, 2);
 706         }
 707 
 708         /* PPP packet length should be >= 2 bytes when protocol field is not
 709          * compressed.
 710          */
 711         if (!(p[0] & 0x01) && skb->len < 2)
 712                 goto err;
 713 
 714         /* queue the frame to be processed */
 715         skb_queue_tail(&ap->rqueue, skb);
 716         return;
 717 
 718 err:
 719         /* queue zero length packet as error indication */
 720         if (skb || (skb = dev_alloc_skb(0))) {
 721                 skb_trim(skb, 0);
 722                 skb_queue_tail(&ap->rqueue, skb);
 723         }
 724 }
 725 
 726 static void __exit
 727 ppp_sync_cleanup(void)
 728 {
 729         if (tty_unregister_ldisc(N_SYNC_PPP) != 0)
 730                 printk(KERN_ERR "failed to unregister Sync PPP line discipline\n");
 731 }
 732 
 733 module_init(ppp_sync_init);
 734 module_exit(ppp_sync_cleanup);
 735 MODULE_LICENSE("GPL");
 736 MODULE_ALIAS_LDISC(N_SYNC_PPP);

/* [<][>][^][v][top][bottom][index][help] */