1/* 2 * Generic PPP layer for Linux. 3 * 4 * Copyright 1999-2002 Paul Mackerras. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 * The generic PPP layer handles the PPP network interfaces, the 12 * /dev/ppp device, packet and VJ compression, and multilink. 13 * It talks to PPP `channels' via the interface defined in 14 * include/linux/ppp_channel.h. Channels provide the basic means for 15 * sending and receiving PPP frames on some kind of communications 16 * channel. 17 * 18 * Part of the code in this driver was inspired by the old async-only 19 * PPP driver, written by Michael Callahan and Al Longyear, and 20 * subsequently hacked by Paul Mackerras. 21 * 22 * ==FILEVERSION 20030706== 23 */ 24 25#include <linux/config.h> 26#include <linux/module.h> 27#include <linux/kernel.h> 28#include <linux/kmod.h> 29#include <linux/init.h> 30#include <linux/list.h> 31#include <linux/devfs_fs_kernel.h> 32#include <linux/netdevice.h> 33#include <linux/poll.h> 34#include <linux/ppp_defs.h> 35#include <linux/filter.h> 36#include <linux/if_ppp.h> 37#include <linux/ppp_channel.h> 38#include <linux/ppp-comp.h> 39#include <linux/skbuff.h> 40#include <linux/rtnetlink.h> 41#include <linux/if_arp.h> 42#include <linux/ip.h> 43#include <linux/tcp.h> 44#include <linux/spinlock.h> 45#include <linux/smp_lock.h> 46#include <linux/rwsem.h> 47#include <linux/stddef.h> 48#include <net/slhc_vj.h> 49#include <asm/atomic.h> 50 51#define PPP_VERSION "2.4.2" 52 53/* 54 * Network protocols we support. 55 */ 56#define NP_IP 0 /* Internet Protocol V4 */ 57#define NP_IPV6 1 /* Internet Protocol V6 */ 58#define NP_IPX 2 /* IPX protocol */ 59#define NP_AT 3 /* Appletalk protocol */ 60#define NUM_NP 4 /* Number of NPs. */ 61 62#define MPHDRLEN 6 /* multilink protocol header length */ 63#define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */ 64#define MIN_FRAG_SIZE 64 65 66/* 67 * An instance of /dev/ppp can be associated with either a ppp 68 * interface unit or a ppp channel. In both cases, file->private_data 69 * points to one of these. 70 */ 71struct ppp_file { 72 enum { 73 INTERFACE=1, CHANNEL 74 } kind; 75 struct sk_buff_head xq; /* pppd transmit queue */ 76 struct sk_buff_head rq; /* receive queue for pppd */ 77 wait_queue_head_t rwait; /* for poll on reading /dev/ppp */ 78 atomic_t refcnt; /* # refs (incl /dev/ppp attached) */ 79 int hdrlen; /* space to leave for headers */ 80 int index; /* interface unit / channel number */ 81 int dead; /* unit/channel has been shut down */ 82}; 83 84#define PF_TO_X(pf, X) ((X *)((char *)(pf) - offsetof(X, file))) 85 86#define PF_TO_PPP(pf) PF_TO_X(pf, struct ppp) 87#define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel) 88 89#define ROUNDUP(n, x) (((n) + (x) - 1) / (x)) 90 91/* 92 * Data structure describing one ppp unit. 93 * A ppp unit corresponds to a ppp network interface device 94 * and represents a multilink bundle. 95 * It can have 0 or more ppp channels connected to it. 96 */ 97struct ppp { 98 struct ppp_file file; /* stuff for read/write/poll 0 */ 99 struct file *owner; /* file that owns this unit 48 */ 100 struct list_head channels; /* list of attached channels 4c */ 101 int n_channels; /* how many channels are attached 54 */ 102 spinlock_t rlock; /* lock for receive side 58 */ 103 spinlock_t wlock; /* lock for transmit side 5c */ 104 int mru; /* max receive unit 60 */ 105 int mru_alloc; /* MAX(1500,MRU) for dev_alloc_skb() */ 106 unsigned int flags; /* control bits 64 */ 107 unsigned int xstate; /* transmit state bits 68 */ 108 unsigned int rstate; /* receive state bits 6c */ 109 int debug; /* debug flags 70 */ 110 struct slcompress *vj; /* state for VJ header compression */ 111 enum NPmode npmode[NUM_NP]; /* what to do with each net proto 78 */ 112 struct sk_buff *xmit_pending; /* a packet ready to go out 88 */ 113 struct compressor *xcomp; /* transmit packet compressor 8c */ 114 void *xc_state; /* its internal state 90 */ 115 struct compressor *rcomp; /* receive decompressor 94 */ 116 void *rc_state; /* its internal state 98 */ 117 unsigned long last_xmit; /* jiffies when last pkt sent 9c */ 118 unsigned long last_recv; /* jiffies when last pkt rcvd a0 */ 119 struct net_device *dev; /* network interface device a4 */ 120#ifdef CONFIG_PPP_MULTILINK 121 int nxchan; /* next channel to send something on */ 122 u32 nxseq; /* next sequence number to send */ 123 int mrru; /* MP: max reconst. receive unit */ 124 u32 nextseq; /* MP: seq no of next packet */ 125 u32 minseq; /* MP: min of most recent seqnos */ 126 struct sk_buff_head mrq; /* MP: receive reconstruction queue */ 127#endif /* CONFIG_PPP_MULTILINK */ 128 struct net_device_stats stats; /* statistics */ 129#ifdef CONFIG_PPP_FILTER 130 struct sock_fprog pass_filter; /* filter for packets to pass */ 131 struct sock_fprog active_filter;/* filter for pkts to reset idle */ 132#endif /* CONFIG_PPP_FILTER */ 133 int xpad; /* ECP or CCP (MPPE) transmit padding */ 134}; 135 136/* 137 * Bits in flags: SC_NO_TCP_CCID, SC_CCP_OPEN, SC_CCP_UP, SC_LOOP_TRAFFIC, 138 * SC_MULTILINK, SC_MP_SHORTSEQ, SC_MP_XSHORTSEQ, SC_COMP_TCP, SC_REJ_COMP_TCP. 139 * Bits in rstate: SC_DECOMP_RUN, SC_DC_ERROR, SC_DC_FERROR. 140 * Bits in xstate: SC_COMP_RUN 141 */ 142#define SC_FLAG_BITS (SC_NO_TCP_CCID|SC_CCP_OPEN|SC_CCP_UP|SC_LOOP_TRAFFIC \ 143 |SC_MULTILINK|SC_MP_SHORTSEQ|SC_MP_XSHORTSEQ \ 144 |SC_COMP_TCP|SC_REJ_COMP_TCP) 145 146/* 147 * Private data structure for each channel. 148 * This includes the data structure used for multilink. 149 */ 150struct channel { 151 struct ppp_file file; /* stuff for read/write/poll */ 152 struct list_head list; /* link in all/new_channels list */ 153 struct ppp_channel *chan; /* public channel data structure */ 154 struct rw_semaphore chan_sem; /* protects `chan' during chan ioctl */ 155 spinlock_t downl; /* protects `chan', file.xq dequeue */ 156 struct ppp *ppp; /* ppp unit we're connected to */ 157 struct list_head clist; /* link in list of channels per unit */ 158 rwlock_t upl; /* protects `ppp' */ 159#ifdef CONFIG_PPP_MULTILINK 160 u8 avail; /* flag used in multilink stuff */ 161 u8 had_frag; /* >= 1 fragments have been sent */ 162 u32 lastseq; /* MP: last sequence # received */ 163#endif /* CONFIG_PPP_MULTILINK */ 164}; 165 166/* 167 * SMP locking issues: 168 * Both the ppp.rlock and ppp.wlock locks protect the ppp.channels 169 * list and the ppp.n_channels field, you need to take both locks 170 * before you modify them. 171 * The lock ordering is: channel.upl -> ppp.wlock -> ppp.rlock -> 172 * channel.downl. 173 */ 174 175/* 176 * A cardmap represents a mapping from unsigned integers to pointers, 177 * and provides a fast "find lowest unused number" operation. 178 * It uses a broad (32-way) tree with a bitmap at each level. 179 * It is designed to be space-efficient for small numbers of entries 180 * and time-efficient for large numbers of entries. 181 */ 182#define CARDMAP_ORDER 5 183#define CARDMAP_WIDTH (1U << CARDMAP_ORDER) 184#define CARDMAP_MASK (CARDMAP_WIDTH - 1) 185 186struct cardmap { 187 int shift; 188 unsigned long inuse; 189 struct cardmap *parent; 190 void *ptr[CARDMAP_WIDTH]; 191}; 192static void *cardmap_get(struct cardmap *map, unsigned int nr); 193static void cardmap_set(struct cardmap **map, unsigned int nr, void *ptr); 194static unsigned int cardmap_find_first_free(struct cardmap *map); 195static void cardmap_destroy(struct cardmap **map); 196 197/* 198 * all_ppp_sem protects the all_ppp_units mapping. 199 * It also ensures that finding a ppp unit in the all_ppp_units map 200 * and updating its file.refcnt field is atomic. 201 */ 202static DECLARE_MUTEX(all_ppp_sem); 203static struct cardmap *all_ppp_units; 204static atomic_t ppp_unit_count = ATOMIC_INIT(0); 205 206/* 207 * all_channels_lock protects all_channels and last_channel_index, 208 * and the atomicity of find a channel and updating its file.refcnt 209 * field. 210 */ 211static spinlock_t all_channels_lock = SPIN_LOCK_UNLOCKED; 212static LIST_HEAD(all_channels); 213static LIST_HEAD(new_channels); 214static int last_channel_index; 215static atomic_t channel_count = ATOMIC_INIT(0); 216 217/* Get the PPP protocol number from a skb */ 218#define PPP_PROTO(skb) (((skb)->data[0] << 8) + (skb)->data[1]) 219 220/* We limit the length of ppp->file.rq to this (arbitrary) value */ 221#define PPP_MAX_RQLEN 32 222 223/* 224 * Maximum number of multilink fragments queued up. 225 * This has to be large enough to cope with the maximum latency of 226 * the slowest channel relative to the others. Strictly it should 227 * depend on the number of channels and their characteristics. 228 */ 229#define PPP_MP_MAX_QLEN 128 230 231/* Multilink header bits. */ 232#define B 0x80 /* this fragment begins a packet */ 233#define E 0x40 /* this fragment ends a packet */ 234 235/* Compare multilink sequence numbers (assumed to be 32 bits wide) */ 236#define seq_before(a, b) ((s32)((a) - (b)) < 0) 237#define seq_after(a, b) ((s32)((a) - (b)) > 0) 238 239/* Prototypes. */ 240static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file, 241 unsigned int cmd, unsigned long arg); 242static void ppp_xmit_process(struct ppp *ppp); 243static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb); 244static void ppp_push(struct ppp *ppp); 245static void ppp_channel_push(struct channel *pch); 246static void ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, 247 struct channel *pch); 248static void ppp_receive_error(struct ppp *ppp); 249static void ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb); 250static struct sk_buff *ppp_decompress_frame(struct ppp *ppp, 251 struct sk_buff *skb); 252#ifdef CONFIG_PPP_MULTILINK 253static void ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, 254 struct channel *pch); 255static void ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb); 256static struct sk_buff *ppp_mp_reconstruct(struct ppp *ppp); 257static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb); 258#endif /* CONFIG_PPP_MULTILINK */ 259static int ppp_set_compress(struct ppp *ppp, unsigned long arg); 260static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound); 261static void ppp_ccp_closed(struct ppp *ppp); 262static struct compressor *find_compressor(int type); 263static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st); 264static struct ppp *ppp_create_interface(int unit, int *retp); 265static void init_ppp_file(struct ppp_file *pf, int kind); 266static void ppp_shutdown_interface(struct ppp *ppp); 267static void ppp_destroy_interface(struct ppp *ppp); 268static struct ppp *ppp_find_unit(int unit); 269static struct channel *ppp_find_channel(int unit); 270static int ppp_connect_channel(struct channel *pch, int unit); 271static int ppp_disconnect_channel(struct channel *pch); 272static void ppp_destroy_channel(struct channel *pch); 273 274/* Translates a PPP protocol number to a NP index (NP == network protocol) */ 275static inline int proto_to_npindex(int proto) 276{ 277 switch (proto) { 278 case PPP_IP: 279 return NP_IP; 280 case PPP_IPV6: 281 return NP_IPV6; 282 case PPP_IPX: 283 return NP_IPX; 284 case PPP_AT: 285 return NP_AT; 286 } 287 return -EINVAL; 288} 289 290/* Translates an NP index into a PPP protocol number */ 291static const int npindex_to_proto[NUM_NP] = { 292 PPP_IP, 293 PPP_IPV6, 294 PPP_IPX, 295 PPP_AT, 296}; 297 298/* Translates an ethertype into an NP index */ 299static inline int ethertype_to_npindex(int ethertype) 300{ 301 switch (ethertype) { 302 case ETH_P_IP: 303 return NP_IP; 304 case ETH_P_IPV6: 305 return NP_IPV6; 306 case ETH_P_IPX: 307 return NP_IPX; 308 case ETH_P_PPPTALK: 309 case ETH_P_ATALK: 310 return NP_AT; 311 } 312 return -1; 313} 314 315/* Translates an NP index into an ethertype */ 316static const int npindex_to_ethertype[NUM_NP] = { 317 ETH_P_IP, 318 ETH_P_IPV6, 319 ETH_P_IPX, 320 ETH_P_PPPTALK, 321}; 322 323/* 324 * Locking shorthand. 325 */ 326#define ppp_xmit_lock(ppp) spin_lock_bh(&(ppp)->wlock) 327#define ppp_xmit_unlock(ppp) spin_unlock_bh(&(ppp)->wlock) 328#define ppp_recv_lock(ppp) spin_lock_bh(&(ppp)->rlock) 329#define ppp_recv_unlock(ppp) spin_unlock_bh(&(ppp)->rlock) 330#define ppp_lock(ppp) do { ppp_xmit_lock(ppp); \ 331 ppp_recv_lock(ppp); } while (0) 332#define ppp_unlock(ppp) do { ppp_recv_unlock(ppp); \ 333 ppp_xmit_unlock(ppp); } while (0) 334 335/* 336 * /dev/ppp device routines. 337 * The /dev/ppp device is used by pppd to control the ppp unit. 338 * It supports the read, write, ioctl and poll functions. 339 * Open instances of /dev/ppp can be in one of three states: 340 * unattached, attached to a ppp unit, or attached to a ppp channel. 341 */ 342static int ppp_open(struct inode *inode, struct file *file) 343{ 344 /* 345 * This could (should?) be enforced by the permissions on /dev/ppp. 346 */ 347 if (!capable(CAP_NET_ADMIN)) 348 return -EPERM; 349 return 0; 350} 351 352static int ppp_release(struct inode *inode, struct file *file) 353{ 354 struct ppp_file *pf = file->private_data; 355 struct ppp *ppp; 356 357 if (pf != 0) { 358 file->private_data = 0; 359 if (pf->kind == INTERFACE) { 360 ppp = PF_TO_PPP(pf); 361 if (file == ppp->owner) 362 ppp_shutdown_interface(ppp); 363 } 364 if (atomic_dec_and_test(&pf->refcnt)) { 365 switch (pf->kind) { 366 case INTERFACE: 367 ppp_destroy_interface(PF_TO_PPP(pf)); 368 break; 369 case CHANNEL: 370 ppp_destroy_channel(PF_TO_CHANNEL(pf)); 371 break; 372 } 373 } 374 } 375 return 0; 376} 377 378static ssize_t ppp_read(struct file *file, char *buf, 379 size_t count, loff_t *ppos) 380{ 381 struct ppp_file *pf = file->private_data; 382 DECLARE_WAITQUEUE(wait, current); 383 ssize_t ret = 0; 384 struct sk_buff *skb = 0; 385 386 if (pf == 0) 387 return -ENXIO; 388 add_wait_queue(&pf->rwait, &wait); 389 for (;;) { 390 set_current_state(TASK_INTERRUPTIBLE); 391 skb = skb_dequeue(&pf->rq); 392 if (skb) 393 break; 394 ret = 0; 395 if (pf->dead) 396 break; 397 ret = -EAGAIN; 398 if (file->f_flags & O_NONBLOCK) 399 break; 400 ret = -ERESTARTSYS; 401 if (signal_pending(current)) 402 break; 403 schedule(); 404 } 405 set_current_state(TASK_RUNNING); 406 remove_wait_queue(&pf->rwait, &wait); 407 408 if (skb == 0) 409 goto err1; 410 411 ret = -EOVERFLOW; 412 if (skb->len > count) 413 goto err2; 414 ret = -EFAULT; 415 if (copy_to_user(buf, skb->data, skb->len)) 416 goto err2; 417 ret = skb->len; 418 419 err2: 420 kfree_skb(skb); 421 err1: 422 return ret; 423} 424 425static ssize_t ppp_write(struct file *file, const char *buf, 426 size_t count, loff_t *ppos) 427{ 428 struct ppp_file *pf = file->private_data; 429 struct sk_buff *skb; 430 ssize_t ret; 431 432 if (pf == 0) 433 return -ENXIO; 434 ret = -ENOMEM; 435 skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL); 436 if (skb == 0) 437 goto err1; 438 skb_reserve(skb, pf->hdrlen); 439 ret = -EFAULT; 440 if (copy_from_user(skb_put(skb, count), buf, count)) { 441 kfree_skb(skb); 442 goto err1; 443 } 444 445 skb_queue_tail(&pf->xq, skb); 446 447 switch (pf->kind) { 448 case INTERFACE: 449 ppp_xmit_process(PF_TO_PPP(pf)); 450 break; 451 case CHANNEL: 452 ppp_channel_push(PF_TO_CHANNEL(pf)); 453 break; 454 } 455 456 ret = count; 457 458 err1: 459 return ret; 460} 461 462/* No kernel lock - fine */ 463static unsigned int ppp_poll(struct file *file, poll_table *wait) 464{ 465 struct ppp_file *pf = file->private_data; 466 unsigned int mask; 467 468 if (pf == 0) 469 return 0; 470 poll_wait(file, &pf->rwait, wait); 471 mask = POLLOUT | POLLWRNORM; 472 if (skb_peek(&pf->rq) != 0) 473 mask |= POLLIN | POLLRDNORM; 474 if (pf->dead) 475 mask |= POLLHUP; 476 return mask; 477} 478 479static int ppp_ioctl(struct inode *inode, struct file *file, 480 unsigned int cmd, unsigned long arg) 481{ 482 struct ppp_file *pf = file->private_data; 483 struct ppp *ppp; 484 int err = -EFAULT, val, val2, i; 485 struct ppp_idle idle; 486 struct npioctl npi; 487 int unit, cflags; 488 struct slcompress *vj; 489 490 if (pf == 0) 491 return ppp_unattached_ioctl(pf, file, cmd, arg); 492 493 if (cmd == PPPIOCDETACH) { 494 /* 495 * We have to be careful here... if the file descriptor 496 * has been dup'd, we could have another process in the 497 * middle of a poll using the same file *, so we had 498 * better not free the interface data structures - 499 * instead we fail the ioctl. Even in this case, we 500 * shut down the interface if we are the owner of it. 501 * Actually, we should get rid of PPPIOCDETACH, userland 502 * (i.e. pppd) could achieve the same effect by closing 503 * this fd and reopening /dev/ppp. 504 */ 505 err = -EINVAL; 506 if (pf->kind == INTERFACE) { 507 ppp = PF_TO_PPP(pf); 508 if (file == ppp->owner) 509 ppp_shutdown_interface(ppp); 510 } 511 if (atomic_read(&file->f_count) <= 2) { 512 ppp_release(inode, file); 513 err = 0; 514 } else 515 printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%d\n", 516 atomic_read(&file->f_count)); 517 return err; 518 } 519 520 if (pf->kind == CHANNEL) { 521 struct channel *pch = PF_TO_CHANNEL(pf); 522 struct ppp_channel *chan; 523 524 switch (cmd) { 525 case PPPIOCCONNECT: 526 if (get_user(unit, (int *) arg)) 527 break; 528 err = ppp_connect_channel(pch, unit); 529 break; 530 531 case PPPIOCDISCONN: 532 err = ppp_disconnect_channel(pch); 533 break; 534 535 default: 536 down_read(&pch->chan_sem); 537 chan = pch->chan; 538 err = -ENOTTY; 539 if (chan && chan->ops->ioctl) 540 err = chan->ops->ioctl(chan, cmd, arg); 541 up_read(&pch->chan_sem); 542 } 543 return err; 544 } 545 546 if (pf->kind != INTERFACE) { 547 /* can't happen */ 548 printk(KERN_ERR "PPP: not interface or channel??\n"); 549 return -EINVAL; 550 } 551 552 ppp = PF_TO_PPP(pf); 553 switch (cmd) { 554 case PPPIOCSMRU: 555 if (get_user(val, (int *) arg)) 556 break; 557 ppp->mru_alloc = ppp->mru = val; 558 if (ppp->mru_alloc < PPP_MRU) 559 ppp->mru_alloc = PPP_MRU; /* increase for broken peers */ 560 err = 0; 561 break; 562 563 case PPPIOCSFLAGS: 564 if (get_user(val, (int *) arg)) 565 break; 566 ppp_lock(ppp); 567 cflags = ppp->flags & ~val; 568 ppp->flags = val & SC_FLAG_BITS; 569 ppp_unlock(ppp); 570 if (cflags & SC_CCP_OPEN) 571 ppp_ccp_closed(ppp); 572 err = 0; 573 break; 574 575 case PPPIOCGFLAGS: 576 val = ppp->flags | ppp->xstate | ppp->rstate; 577 if (put_user(val, (int *) arg)) 578 break; 579 err = 0; 580 break; 581 582 case PPPIOCSCOMPRESS: 583 err = ppp_set_compress(ppp, arg); 584 break; 585 586 case PPPIOCGUNIT: 587 if (put_user(ppp->file.index, (int *) arg)) 588 break; 589 err = 0; 590 break; 591 592 case PPPIOCSDEBUG: 593 if (get_user(val, (int *) arg)) 594 break; 595 ppp->debug = val; 596 err = 0; 597 break; 598 599 case PPPIOCGDEBUG: 600 if (put_user(ppp->debug, (int *) arg)) 601 break; 602 err = 0; 603 break; 604 605 case PPPIOCGIDLE: 606 idle.xmit_idle = (jiffies - ppp->last_xmit) / HZ; 607 idle.recv_idle = (jiffies - ppp->last_recv) / HZ; 608 if (copy_to_user((void *) arg, &idle, sizeof(idle))) 609 break; 610 err = 0; 611 break; 612 613 case PPPIOCSMAXCID: 614 if (get_user(val, (int *) arg)) 615 break; 616 val2 = 15; 617 if ((val >> 16) != 0) { 618 val2 = val >> 16; 619 val &= 0xffff; 620 } 621 vj = slhc_init(val2+1, val+1); 622 if (vj == 0) { 623 printk(KERN_ERR "PPP: no memory (VJ compressor)\n"); 624 err = -ENOMEM; 625 break; 626 } 627 ppp_lock(ppp); 628 if (ppp->vj != 0) 629 slhc_free(ppp->vj); 630 ppp->vj = vj; 631 ppp_unlock(ppp); 632 err = 0; 633 break; 634 635 case PPPIOCGNPMODE: 636 case PPPIOCSNPMODE: 637 if (copy_from_user(&npi, (void *) arg, sizeof(npi))) 638 break; 639 err = proto_to_npindex(npi.protocol); 640 if (err < 0) 641 break; 642 i = err; 643 if (cmd == PPPIOCGNPMODE) { 644 err = -EFAULT; 645 npi.mode = ppp->npmode[i]; 646 if (copy_to_user((void *) arg, &npi, sizeof(npi))) 647 break; 648 } else { 649 ppp->npmode[i] = npi.mode; 650 /* we may be able to transmit more packets now (??) */ 651 netif_wake_queue(ppp->dev); 652 } 653 err = 0; 654 break; 655 656#ifdef CONFIG_PPP_FILTER 657 case PPPIOCSPASS: 658 case PPPIOCSACTIVE: 659 { 660 struct sock_fprog uprog, *filtp; 661 struct sock_filter *code = NULL; 662 int len; 663 664 if (copy_from_user(&uprog, (void *) arg, sizeof(uprog))) 665 break; 666 if (uprog.len > 0 && uprog.len < 65536) { 667 err = -ENOMEM; 668 len = uprog.len * sizeof(struct sock_filter); 669 code = kmalloc(len, GFP_KERNEL); 670 if (code == 0) 671 break; 672 err = -EFAULT; 673 if (copy_from_user(code, uprog.filter, len)) 674 break; 675 err = sk_chk_filter(code, uprog.len); 676 if (err) { 677 kfree(code); 678 break; 679 } 680 } 681 filtp = (cmd == PPPIOCSPASS)? &ppp->pass_filter: &ppp->active_filter; 682 ppp_lock(ppp); 683 if (filtp->filter) 684 kfree(filtp->filter); 685 filtp->filter = code; 686 filtp->len = uprog.len; 687 ppp_unlock(ppp); 688 err = 0; 689 break; 690 } 691#endif /* CONFIG_PPP_FILTER */ 692 693#ifdef CONFIG_PPP_MULTILINK 694 case PPPIOCSMRRU: 695 if (get_user(val, (int *) arg)) 696 break; 697 ppp_recv_lock(ppp); 698 ppp->mrru = val; 699 ppp_recv_unlock(ppp); 700 err = 0; 701 break; 702#endif /* CONFIG_PPP_MULTILINK */ 703 704 default: 705 err = -ENOTTY; 706 } 707 708 return err; 709} 710 711static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file, 712 unsigned int cmd, unsigned long arg) 713{ 714 int unit, err = -EFAULT; 715 struct ppp *ppp; 716 struct channel *chan; 717 718 switch (cmd) { 719 case PPPIOCNEWUNIT: 720 /* Create a new ppp unit */ 721 if (get_user(unit, (int *) arg)) 722 break; 723 ppp = ppp_create_interface(unit, &err); 724 if (ppp == 0) 725 break; 726 file->private_data = &ppp->file; 727 ppp->owner = file; 728 err = -EFAULT; 729 if (put_user(ppp->file.index, (int *) arg)) 730 break; 731 err = 0; 732 break; 733 734 case PPPIOCATTACH: 735 /* Attach to an existing ppp unit */ 736 if (get_user(unit, (int *) arg)) 737 break; 738 down(&all_ppp_sem); 739 err = -ENXIO; 740 ppp = ppp_find_unit(unit); 741 if (ppp != 0) { 742 atomic_inc(&ppp->file.refcnt); 743 file->private_data = &ppp->file; 744 err = 0; 745 } 746 up(&all_ppp_sem); 747 break; 748 749 case PPPIOCATTCHAN: 750 if (get_user(unit, (int *) arg)) 751 break; 752 spin_lock_bh(&all_channels_lock); 753 err = -ENXIO; 754 chan = ppp_find_channel(unit); 755 if (chan != 0) { 756 atomic_inc(&chan->file.refcnt); 757 file->private_data = &chan->file; 758 err = 0; 759 } 760 spin_unlock_bh(&all_channels_lock); 761 break; 762 763 default: 764 err = -ENOTTY; 765 } 766 return err; 767} 768 769static struct file_operations ppp_device_fops = { 770 owner: THIS_MODULE, 771 read: ppp_read, 772 write: ppp_write, 773 poll: ppp_poll, 774 ioctl: ppp_ioctl, 775 open: ppp_open, 776 release: ppp_release 777}; 778 779#define PPP_MAJOR 108 780 781static devfs_handle_t devfs_handle; 782 783/* Called at boot time if ppp is compiled into the kernel, 784 or at module load time (from init_module) if compiled as a module. */ 785int __init ppp_init(void) 786{ 787 int err; 788 789 printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n"); 790 err = devfs_register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops); 791 if (err) 792 printk(KERN_ERR "failed to register PPP device (%d)\n", err); 793 devfs_handle = devfs_register(NULL, "ppp", DEVFS_FL_DEFAULT, 794 PPP_MAJOR, 0, 795 S_IFCHR | S_IRUSR | S_IWUSR, 796 &ppp_device_fops, NULL); 797 798 return 0; 799} 800 801/* 802 * Network interface unit routines. 803 */ 804static int 805ppp_start_xmit(struct sk_buff *skb, struct net_device *dev) 806{ 807 struct ppp *ppp = (struct ppp *) dev->priv; 808 int npi, proto; 809 unsigned char *pp; 810 811 npi = ethertype_to_npindex(ntohs(skb->protocol)); 812 if (npi < 0) 813 goto err1; 814 815 /* Drop, accept or reject the packet */ 816 switch (ppp->npmode[npi]) { 817 case NPMODE_PASS: 818 break; 819 case NPMODE_QUEUE: 820 /* it would be nice to have a way to tell the network 821 system to queue this one up for later. */ 822 goto err1; 823 case NPMODE_DROP: 824 case NPMODE_ERROR: 825 goto err1; 826 } 827 828 /* Put the 2-byte PPP protocol number on the front, 829 making sure there is room for the address and control fields. */ 830 if (skb_headroom(skb) < PPP_HDRLEN) { 831 struct sk_buff *ns; 832 833 ns = alloc_skb(skb->len + dev->hard_header_len, GFP_ATOMIC); 834 if (ns == 0) 835 goto err1; 836 skb_reserve(ns, dev->hard_header_len); 837 memcpy(skb_put(ns, skb->len), skb->data, skb->len); 838 kfree_skb(skb); 839 skb = ns; 840 } 841 pp = skb_push(skb, 2); 842 proto = npindex_to_proto[npi]; 843 pp[0] = proto >> 8; 844 pp[1] = proto; 845 846 netif_stop_queue(dev); 847 skb_queue_tail(&ppp->file.xq, skb); 848 ppp_xmit_process(ppp); 849 return 0; 850 851 err1: 852 kfree_skb(skb); 853 ++ppp->stats.tx_dropped; 854 return 0; 855} 856 857static struct net_device_stats * 858ppp_net_stats(struct net_device *dev) 859{ 860 struct ppp *ppp = (struct ppp *) dev->priv; 861 862 return &ppp->stats; 863} 864 865static int 866ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 867{ 868 struct ppp *ppp = dev->priv; 869 int err = -EFAULT; 870 void *addr = (void *) ifr->ifr_ifru.ifru_data; 871 struct ppp_stats stats; 872 struct ppp_comp_stats cstats; 873 char *vers; 874 875 switch (cmd) { 876 case SIOCGPPPSTATS: 877 ppp_get_stats(ppp, &stats); 878 if (copy_to_user(addr, &stats, sizeof(stats))) 879 break; 880 err = 0; 881 break; 882 883 case SIOCGPPPCSTATS: 884 memset(&cstats, 0, sizeof(cstats)); 885 if (ppp->xc_state != 0) 886 ppp->xcomp->comp_stat(ppp->xc_state, &cstats.c); 887 if (ppp->rc_state != 0) 888 ppp->rcomp->decomp_stat(ppp->rc_state, &cstats.d); 889 if (copy_to_user(addr, &cstats, sizeof(cstats))) 890 break; 891 err = 0; 892 break; 893 894 case SIOCGPPPVER: 895 vers = PPP_VERSION; 896 if (copy_to_user(addr, vers, strlen(vers) + 1)) 897 break; 898 err = 0; 899 break; 900 901 default: 902 err = -EINVAL; 903 } 904 905 return err; 906} 907 908static int 909ppp_net_init(struct net_device *dev) 910{ 911 dev->hard_header_len = PPP_HDRLEN; 912 dev->mtu = PPP_MTU; 913 dev->hard_start_xmit = ppp_start_xmit; 914 dev->get_stats = ppp_net_stats; 915 dev->do_ioctl = ppp_net_ioctl; 916 dev->addr_len = 0; 917 dev->tx_queue_len = 3; 918 dev->type = ARPHRD_PPP; 919 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 920 return 0; 921} 922 923/* 924 * Transmit-side routines. 925 */ 926 927/* 928 * Called to do any work queued up on the transmit side 929 * that can now be done. 930 */ 931static void 932ppp_xmit_process(struct ppp *ppp) 933{ 934 struct sk_buff *skb; 935 936 ppp_xmit_lock(ppp); 937 if (ppp->dev != 0) { 938 ppp_push(ppp); 939 while (ppp->xmit_pending == 0 940 && (skb = skb_dequeue(&ppp->file.xq)) != 0) 941 ppp_send_frame(ppp, skb); 942 /* If there's no work left to do, tell the core net 943 code that we can accept some more. */ 944 if (ppp->xmit_pending == 0 && skb_peek(&ppp->file.xq) == 0) 945 netif_wake_queue(ppp->dev); 946 } 947 ppp_xmit_unlock(ppp); 948} 949 950/* 951 * Compress and send a frame. 952 * The caller should have locked the xmit path, 953 * and xmit_pending should be 0. 954 */ 955static void 956ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) 957{ 958 int proto = PPP_PROTO(skb); 959 struct sk_buff *new_skb; 960 int len; 961 unsigned char *cp; 962 963 if (proto < 0x8000) { 964#ifdef CONFIG_PPP_FILTER 965 /* check if we should pass this packet */ 966 /* the filter instructions are constructed assuming 967 a four-byte PPP header on each packet */ 968 *skb_push(skb, 2) = 1; 969 if (ppp->pass_filter.filter 970 && sk_run_filter(skb, ppp->pass_filter.filter, 971 ppp->pass_filter.len) == 0) { 972 if (ppp->debug & 1) { 973 printk(KERN_DEBUG "PPP: outbound frame not passed\n"); 974 kfree_skb(skb); 975 return; 976 } 977 } 978 /* if this packet passes the active filter, record the time */ 979 if (!(ppp->active_filter.filter 980 && sk_run_filter(skb, ppp->active_filter.filter, 981 ppp->active_filter.len) == 0)) 982 ppp->last_xmit = jiffies; 983 skb_pull(skb, 2); 984#else 985 /* for data packets, record the time */ 986 ppp->last_xmit = jiffies; 987#endif /* CONFIG_PPP_FILTER */ 988 } 989 990 ++ppp->stats.tx_packets; 991 ppp->stats.tx_bytes += skb->len - 2; 992 993 switch (proto) { 994 case PPP_IP: 995 if (ppp->vj == 0 || (ppp->flags & SC_COMP_TCP) == 0) 996 break; 997 /* try to do VJ TCP header compression */ 998 new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2, 999 GFP_ATOMIC); 1000 if (new_skb == 0) { 1001 printk(KERN_ERR "PPP: no memory (VJ comp pkt)\n"); 1002 goto drop; 1003 } 1004 skb_reserve(new_skb, ppp->dev->hard_header_len - 2); 1005 cp = skb->data + 2; 1006 len = slhc_compress(ppp->vj, cp, skb->len - 2, 1007 new_skb->data + 2, &cp, 1008 !(ppp->flags & SC_NO_TCP_CCID)); 1009 if (cp == skb->data + 2) { 1010 /* didn't compress */ 1011 kfree_skb(new_skb); 1012 } else { 1013 if (cp[0] & SL_TYPE_COMPRESSED_TCP) { 1014 proto = PPP_VJC_COMP; 1015 cp[0] &= ~SL_TYPE_COMPRESSED_TCP; 1016 } else { 1017 proto = PPP_VJC_UNCOMP; 1018 cp[0] = skb->data[2]; 1019 } 1020 kfree_skb(skb); 1021 skb = new_skb; 1022 cp = skb_put(skb, len + 2); 1023 cp[0] = 0; 1024 cp[1] = proto; 1025 } 1026 break; 1027 1028 case PPP_CCP: 1029 /* peek at outbound CCP frames */ 1030 ppp_ccp_peek(ppp, skb, 0); 1031 /* 1032 * When LZS or MPPE/MPPC is negotiated we don't send 1033 * CCP_RESETACK after receiving CCP_RESETREQ; in fact pppd 1034 * sends such a packet but we silently discard it here 1035 */ 1036 if (CCP_CODE(skb->data+2) == CCP_RESETACK 1037 && (ppp->xcomp->compress_proto == CI_MPPE 1038 || ppp->xcomp->compress_proto == CI_LZS)) { 1039 --ppp->stats.tx_packets; 1040 ppp->stats.tx_bytes -= skb->len - 2; 1041 kfree_skb(skb); 1042 return; 1043 } 1044 break; 1045 } 1046 1047 /* try to do packet compression */ 1048 if ((ppp->xstate & SC_COMP_RUN) && ppp->xc_state != 0 1049 && proto != PPP_LCP && proto != PPP_CCP) { 1050 int comp_ovhd = 0; 1051 /* because of possible data expansion when MPPC or LZS 1052 is used, allocate compressor's buffer about 12.5% bigger 1053 than MTU */ 1054 if (ppp->xcomp->compress_proto == CI_MPPE) 1055 comp_ovhd = (((ppp->dev->mtu * 9) / 8) + 1); 1056 else if (ppp->xcomp->compress_proto == CI_LZS) 1057 comp_ovhd = (((ppp->dev->mtu * 9) / 8) + 1) + LZS_OVHD; 1058 new_skb = alloc_skb(ppp->dev->mtu + ppp->dev->hard_header_len 1059 + ppp->xpad + comp_ovhd, GFP_ATOMIC); 1060 if (new_skb == 0) { 1061 printk(KERN_ERR "PPP: no memory (comp pkt)\n"); 1062 goto drop; 1063 } 1064 if (ppp->dev->hard_header_len > PPP_HDRLEN) 1065 skb_reserve(new_skb, 1066 ppp->dev->hard_header_len - PPP_HDRLEN); 1067 1068 /* compressor still expects A/C bytes in hdr */ 1069 len = ppp->xcomp->compress(ppp->xc_state, skb->data - 2, 1070 new_skb->data, skb->len + 2, 1071 ppp->dev->mtu + ppp->xpad 1072 + PPP_HDRLEN); 1073 if (len > 0 && (ppp->flags & SC_CCP_UP)) { 1074 kfree_skb(skb); 1075 skb = new_skb; 1076 skb_put(skb, len); 1077 skb_pull(skb, 2); /* pull off A/C bytes */ 1078 } else if (len == 0) { 1079 /* didn't compress, or CCP not up yet */ 1080 kfree_skb(new_skb); 1081 } else { 1082 /* 1083 * (len < 0) 1084 * MPPE requires that we do not send unencrypted 1085 * frames. The compressor will return -1 if we 1086 * should drop the frame. We cannot simply test 1087 * the compress_proto because MPPE and MPPC share 1088 * the same number. 1089 */ 1090 printk(KERN_ERR "ppp: compressor dropped pkt\n"); 1091 kfree_skb(new_skb); 1092 goto drop; 1093 } 1094 } 1095 1096 /* 1097 * If we are waiting for traffic (demand dialling), 1098 * queue it up for pppd to receive. 1099 */ 1100 if (ppp->flags & SC_LOOP_TRAFFIC) { 1101 if (ppp->file.rq.qlen > PPP_MAX_RQLEN) 1102 goto drop; 1103 skb_queue_tail(&ppp->file.rq, skb); 1104 wake_up_interruptible(&ppp->file.rwait); 1105 return; 1106 } 1107 1108 ppp->xmit_pending = skb; 1109 ppp_push(ppp); 1110 return; 1111 1112 drop: 1113 kfree_skb(skb); 1114 ++ppp->stats.tx_errors; 1115} 1116 1117/* 1118 * Try to send the frame in xmit_pending. 1119 * The caller should have the xmit path locked. 1120 */ 1121static void 1122ppp_push(struct ppp *ppp) 1123{ 1124 struct list_head *list; 1125 struct channel *pch; 1126 struct sk_buff *skb = ppp->xmit_pending; 1127 1128 if (skb == 0) 1129 return; 1130 1131 list = &ppp->channels; 1132 if (list_empty(list)) { 1133 /* nowhere to send the packet, just drop it */ 1134 ppp->xmit_pending = 0; 1135 kfree_skb(skb); 1136 return; 1137 } 1138 1139 if ((ppp->flags & SC_MULTILINK) == 0) { 1140 /* not doing multilink: send it down the first channel */ 1141 list = list->next; 1142 pch = list_entry(list, struct channel, clist); 1143 1144 spin_lock_bh(&pch->downl); 1145 if (pch->chan) { 1146 if (pch->chan->ops->start_xmit(pch->chan, skb)) 1147 ppp->xmit_pending = 0; 1148 } else { 1149 /* channel got unregistered */ 1150 kfree_skb(skb); 1151 ppp->xmit_pending = 0; 1152 } 1153 spin_unlock_bh(&pch->downl); 1154 return; 1155 } 1156 1157#ifdef CONFIG_PPP_MULTILINK 1158 /* Multilink: fragment the packet over as many links 1159 as can take the packet at the moment. */ 1160 if (!ppp_mp_explode(ppp, skb)) 1161 return; 1162#endif /* CONFIG_PPP_MULTILINK */ 1163 1164 ppp->xmit_pending = 0; 1165 kfree_skb(skb); 1166} 1167 1168#ifdef CONFIG_PPP_MULTILINK 1169/* 1170 * Divide a packet to be transmitted into fragments and 1171 * send them out the individual links. 1172 */ 1173static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) 1174{ 1175 int nch, len, fragsize; 1176 int i, bits, hdrlen, mtu; 1177 int flen, fnb; 1178 unsigned char *p, *q; 1179 struct list_head *list; 1180 struct channel *pch; 1181 struct sk_buff *frag; 1182 struct ppp_channel *chan; 1183 1184 nch = 0; 1185 hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; 1186 list = &ppp->channels; 1187 while ((list = list->next) != &ppp->channels) { 1188 pch = list_entry(list, struct channel, clist); 1189 nch += pch->avail = (skb_queue_len(&pch->file.xq) == 0); 1190 /* 1191 * If a channel hasn't had a fragment yet, it has to get 1192 * one before we send any fragments on later channels. 1193 * If it can't take a fragment now, don't give any 1194 * to subsequent channels. 1195 */ 1196 if (!pch->had_frag && !pch->avail) { 1197 while ((list = list->next) != &ppp->channels) { 1198 pch = list_entry(list, struct channel, clist); 1199 pch->avail = 0; 1200 } 1201 break; 1202 } 1203 } 1204 if (nch == 0) 1205 return 0; /* can't take now, leave it in xmit_pending */ 1206 1207 p = skb->data; 1208 len = skb->len; 1209 if (*p == 0) { 1210 ++p; 1211 --len; 1212 } 1213 1214 /* decide on fragment size */ 1215 fragsize = len; 1216 if (nch > 1) { 1217 int maxch = ROUNDUP(len, MIN_FRAG_SIZE); 1218 if (nch > maxch) 1219 nch = maxch; 1220 fragsize = ROUNDUP(fragsize, nch); 1221 } 1222 1223 /* skip to the channel after the one we last used 1224 and start at that one */ 1225 for (i = 0; i < ppp->nxchan; ++i) { 1226 list = list->next; 1227 if (list == &ppp->channels) { 1228 i = 0; 1229 break; 1230 } 1231 } 1232 1233 /* create a fragment for each channel */ 1234 bits = B; 1235 do { 1236 list = list->next; 1237 if (list == &ppp->channels) { 1238 i = 0; 1239 continue; 1240 } 1241 pch = list_entry(list, struct channel, clist); 1242 ++i; 1243 if (!pch->avail) 1244 continue; 1245 1246 /* check the channel's mtu and whether it is still attached. */ 1247 spin_lock_bh(&pch->downl); 1248 if (pch->chan == 0 || (mtu = pch->chan->mtu) < hdrlen) { 1249 /* can't use this channel */ 1250 spin_unlock_bh(&pch->downl); 1251 pch->avail = 0; 1252 if (--nch == 0) 1253 break; 1254 continue; 1255 } 1256 1257 /* 1258 * We have to create multiple fragments for this channel 1259 * if fragsize is greater than the channel's mtu. 1260 */ 1261 if (fragsize > len) 1262 fragsize = len; 1263 for (flen = fragsize; flen > 0; flen -= fnb) { 1264 fnb = flen; 1265 if (fnb > mtu + 2 - hdrlen) 1266 fnb = mtu + 2 - hdrlen; 1267 if (fnb >= len) 1268 bits |= E; 1269 frag = alloc_skb(fnb + hdrlen, GFP_ATOMIC); 1270 if (frag == 0) 1271 goto noskb; 1272 q = skb_put(frag, fnb + hdrlen); 1273 /* make the MP header */ 1274 q[0] = PPP_MP >> 8; 1275 q[1] = PPP_MP; 1276 if (ppp->flags & SC_MP_XSHORTSEQ) { 1277 q[2] = bits + ((ppp->nxseq >> 8) & 0xf); 1278 q[3] = ppp->nxseq; 1279 } else { 1280 q[2] = bits; 1281 q[3] = ppp->nxseq >> 16; 1282 q[4] = ppp->nxseq >> 8; 1283 q[5] = ppp->nxseq; 1284 } 1285 1286 /* copy the data in */ 1287 memcpy(q + hdrlen, p, fnb); 1288 1289 /* try to send it down the channel */ 1290 chan = pch->chan; 1291 if (!chan->ops->start_xmit(chan, frag)) 1292 skb_queue_tail(&pch->file.xq, frag); 1293 pch->had_frag = 1; 1294 p += fnb; 1295 len -= fnb; 1296 ++ppp->nxseq; 1297 bits = 0; 1298 } 1299 spin_unlock_bh(&pch->downl); 1300 } while (len > 0); 1301 ppp->nxchan = i; 1302 1303 return 1; 1304 1305 noskb: 1306 spin_unlock_bh(&pch->downl); 1307 if (ppp->debug & 1) 1308 printk(KERN_ERR "PPP: no memory (fragment)\n"); 1309 ++ppp->stats.tx_errors; 1310 ++ppp->nxseq; 1311 return 1; /* abandon the frame */ 1312} 1313#endif /* CONFIG_PPP_MULTILINK */ 1314 1315/* 1316 * Try to send data out on a channel. 1317 */ 1318static void 1319ppp_channel_push(struct channel *pch) 1320{ 1321 struct sk_buff *skb; 1322 struct ppp *ppp; 1323 1324 spin_lock_bh(&pch->downl); 1325 if (pch->chan != 0) { 1326 while (skb_queue_len(&pch->file.xq) > 0) { 1327 skb = skb_dequeue(&pch->file.xq); 1328 if (!pch->chan->ops->start_xmit(pch->chan, skb)) { 1329 /* put the packet back and try again later */ 1330 skb_queue_head(&pch->file.xq, skb); 1331 break; 1332 } 1333 } 1334 } else { 1335 /* channel got deregistered */ 1336 skb_queue_purge(&pch->file.xq); 1337 } 1338 spin_unlock_bh(&pch->downl); 1339 /* see if there is anything from the attached unit to be sent */ 1340 if (skb_queue_len(&pch->file.xq) == 0) { 1341 read_lock_bh(&pch->upl); 1342 ppp = pch->ppp; 1343 if (ppp != 0) 1344 ppp_xmit_process(ppp); 1345 read_unlock_bh(&pch->upl); 1346 } 1347} 1348 1349/* 1350 * Receive-side routines. 1351 */ 1352 1353/* misuse a few fields of the skb for MP reconstruction */ 1354#define sequence priority 1355#define BEbits cb[0] 1356 1357static inline void 1358ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) 1359{ 1360 ppp_recv_lock(ppp); 1361 /* ppp->dev == 0 means interface is closing down */ 1362 if (ppp->dev != 0) 1363 ppp_receive_frame(ppp, skb, pch); 1364 else 1365 kfree_skb(skb); 1366 ppp_recv_unlock(ppp); 1367} 1368 1369void 1370ppp_input(struct ppp_channel *chan, struct sk_buff *skb) 1371{ 1372 struct channel *pch = chan->ppp; 1373 int proto; 1374 1375 if (pch == 0 || skb->len == 0) { 1376 kfree_skb(skb); 1377 return; 1378 } 1379 1380 proto = PPP_PROTO(skb); 1381 read_lock_bh(&pch->upl); 1382 if (pch->ppp == 0 || proto >= 0xc000 || proto == PPP_CCPFRAG) { 1383 /* put it on the channel queue */ 1384 skb_queue_tail(&pch->file.rq, skb); 1385 /* drop old frames if queue too long */ 1386 while (pch->file.rq.qlen > PPP_MAX_RQLEN 1387 && (skb = skb_dequeue(&pch->file.rq)) != 0) 1388 kfree_skb(skb); 1389 wake_up_interruptible(&pch->file.rwait); 1390 } else { 1391 ppp_do_recv(pch->ppp, skb, pch); 1392 } 1393 read_unlock_bh(&pch->upl); 1394} 1395 1396/* Put a 0-length skb in the receive queue as an error indication */ 1397void 1398ppp_input_error(struct ppp_channel *chan, int code) 1399{ 1400 struct channel *pch = chan->ppp; 1401 struct sk_buff *skb; 1402 1403 if (pch == 0) 1404 return; 1405 1406 read_lock_bh(&pch->upl); 1407 if (pch->ppp != 0) { 1408 skb = alloc_skb(0, GFP_ATOMIC); 1409 if (skb != 0) { 1410 skb->len = 0; /* probably unnecessary */ 1411 skb->cb[0] = code; 1412 ppp_do_recv(pch->ppp, skb, pch); 1413 } 1414 } 1415 read_unlock_bh(&pch->upl); 1416} 1417 1418/* 1419 * We come in here to process a received frame. 1420 * The receive side of the ppp unit is locked. 1421 */ 1422static void 1423ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) 1424{ 1425 if (skb->len >= 2) { 1426#ifdef CONFIG_PPP_MULTILINK 1427 if (PPP_PROTO(skb) == PPP_MP) 1428 ppp_receive_mp_frame(ppp, skb, pch); 1429 else 1430#endif /* CONFIG_PPP_MULTILINK */ 1431 ppp_receive_nonmp_frame(ppp, skb); 1432 return; 1433 } 1434 1435 if (skb->len > 0) 1436 /* note: a 0-length skb is used as an error indication */ 1437 ++ppp->stats.rx_length_errors; 1438 1439 kfree_skb(skb); 1440 ppp_receive_error(ppp); 1441} 1442 1443static void 1444ppp_receive_error(struct ppp *ppp) 1445{ 1446 ++ppp->stats.rx_errors; 1447 if (ppp->vj != 0) 1448 slhc_toss(ppp->vj); 1449} 1450 1451static void 1452ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) 1453{ 1454 struct sk_buff *ns; 1455 int proto, len, npi; 1456 1457 /* 1458 * Decompress the frame, if compressed. 1459 * Note that some decompressors need to see uncompressed frames 1460 * that come in as well as compressed frames. 1461 */ 1462 if (ppp->rc_state != 0 && (ppp->rstate & SC_DECOMP_RUN) 1463 && (ppp->rstate & (SC_DC_FERROR | SC_DC_ERROR)) == 0) 1464 skb = ppp_decompress_frame(ppp, skb); 1465 1466 proto = PPP_PROTO(skb); 1467 switch (proto) { 1468 case PPP_VJC_COMP: 1469 /* decompress VJ compressed packets */ 1470 if (ppp->vj == 0 || (ppp->flags & SC_REJ_COMP_TCP)) 1471 goto err; 1472 if (skb_tailroom(skb) < 124) { 1473 /* copy to a new sk_buff with more tailroom */ 1474 ns = dev_alloc_skb(skb->len + 128); 1475 if (ns == 0) { 1476 printk(KERN_ERR"PPP: no memory (VJ decomp)\n"); 1477 goto err; 1478 } 1479 skb_reserve(ns, 2); 1480 memcpy(skb_put(ns, skb->len), skb->data, skb->len); 1481 kfree_skb(skb); 1482 skb = ns; 1483 } 1484 len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2); 1485 if (len <= 0) { 1486 printk(KERN_DEBUG "PPP: VJ decompression error\n"); 1487 goto err; 1488 } 1489 len += 2; 1490 if (len > skb->len) 1491 skb_put(skb, len - skb->len); 1492 else if (len < skb->len) 1493 skb_trim(skb, len); 1494 proto = PPP_IP; 1495 break; 1496 1497 case PPP_VJC_UNCOMP: 1498 if (ppp->vj == 0 || (ppp->flags & SC_REJ_COMP_TCP)) 1499 goto err; 1500 if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) { 1501 printk(KERN_ERR "PPP: VJ uncompressed error\n"); 1502 goto err; 1503 } 1504 proto = PPP_IP; 1505 break; 1506 1507 case PPP_CCP: 1508 ppp_ccp_peek(ppp, skb, 1); 1509 break; 1510 } 1511 1512 ++ppp->stats.rx_packets; 1513 ppp->stats.rx_bytes += skb->len - 2; 1514 1515 npi = proto_to_npindex(proto); 1516 if (npi < 0) { 1517 /* control or unknown frame - pass it to pppd */ 1518 skb_queue_tail(&ppp->file.rq, skb); 1519 /* limit queue length by dropping old frames */ 1520 while (ppp->file.rq.qlen > PPP_MAX_RQLEN 1521 && (skb = skb_dequeue(&ppp->file.rq)) != 0) 1522 kfree_skb(skb); 1523 /* wake up any process polling or blocking on read */ 1524 wake_up_interruptible(&ppp->file.rwait); 1525 1526 } else { 1527 /* network protocol frame - give it to the kernel */ 1528 1529#ifdef CONFIG_PPP_FILTER 1530 /* check if the packet passes the pass and active filters */ 1531 /* the filter instructions are constructed assuming 1532 a four-byte PPP header on each packet */ 1533 *skb_push(skb, 2) = 0; 1534 if (ppp->pass_filter.filter 1535 && sk_run_filter(skb, ppp->pass_filter.filter, 1536 ppp->pass_filter.len) == 0) { 1537 if (ppp->debug & 1) 1538 printk(KERN_DEBUG "PPP: inbound frame not passed\n"); 1539 kfree_skb(skb); 1540 return; 1541 } 1542 if (!(ppp->active_filter.filter 1543 && sk_run_filter(skb, ppp->active_filter.filter, 1544 ppp->active_filter.len) == 0)) 1545 ppp->last_recv = jiffies; 1546 skb_pull(skb, 2); 1547#else 1548 ppp->last_recv = jiffies; 1549#endif /* CONFIG_PPP_FILTER */ 1550 1551 if ((ppp->dev->flags & IFF_UP) == 0 1552 || ppp->npmode[npi] != NPMODE_PASS) { 1553 kfree_skb(skb); 1554 } else { 1555 skb_pull(skb, 2); /* chop off protocol */ 1556 skb->dev = ppp->dev; 1557 skb->protocol = htons(npindex_to_ethertype[npi]); 1558 skb->mac.raw = skb->data; 1559 netif_rx(skb); 1560 ppp->dev->last_rx = jiffies; 1561 } 1562 } 1563 return; 1564 1565 err: 1566 kfree_skb(skb); 1567 ppp_receive_error(ppp); 1568} 1569 1570static struct sk_buff * 1571ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb) 1572{ 1573 int proto = PPP_PROTO(skb); 1574 struct sk_buff *ns; 1575 int len; 1576 1577 if (proto == PPP_COMP) { 1578 ns = dev_alloc_skb(ppp->mru_alloc + PPP_HDRLEN); 1579 if (ns == 0) { 1580 printk(KERN_ERR "ppp_decompress_frame: no memory\n"); 1581 goto err; 1582 } 1583 /* the decompressor still expects the A/C bytes in the hdr */ 1584 len = ppp->rcomp->decompress(ppp->rc_state, skb->data - 2, 1585 skb->len + 2, ns->data, 1586 ppp->mru_alloc + PPP_HDRLEN); 1587 if (len < 0) { 1588 /* Pass the compressed frame to pppd as an 1589 error indication. */ 1590 if (len == DECOMP_FATALERROR) 1591 ppp->rstate |= SC_DC_FERROR; 1592 kfree_skb(ns); 1593 goto err; 1594 } 1595 1596 kfree_skb(skb); 1597 skb = ns; 1598 skb_put(skb, len); 1599 skb_pull(skb, 2); /* pull off the A/C bytes */ 1600 1601 } else { 1602 /* Uncompressed frame - pass to decompressor so it 1603 can update its dictionary if necessary. */ 1604 if (ppp->rcomp->incomp) 1605 ppp->rcomp->incomp(ppp->rc_state, skb->data - 2, 1606 skb->len + 2); 1607 } 1608 1609 return skb; 1610 1611 err: 1612 if (ppp->rcomp->compress_proto != CI_MPPE 1613 && ppp->rcomp->compress_proto != CI_LZS) { 1614 /* If decompression protocol isn't MPPE/MPPC or LZS, we set 1615 SC_DC_ERROR flag and wait for CCP_RESETACK */ 1616 ppp->rstate |= SC_DC_ERROR; 1617 } 1618 ppp_receive_error(ppp); 1619 return skb; 1620} 1621 1622#ifdef CONFIG_PPP_MULTILINK 1623/* 1624 * Receive a multilink frame. 1625 * We put it on the reconstruction queue and then pull off 1626 * as many completed frames as we can. 1627 */ 1628static void 1629ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) 1630{ 1631 u32 mask, seq; 1632 struct list_head *l; 1633 int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; 1634 1635 if (skb->len < mphdrlen + 1 || ppp->mrru == 0) 1636 goto err; /* no good, throw it away */ 1637 1638 /* Decode sequence number and begin/end bits */ 1639 if (ppp->flags & SC_MP_SHORTSEQ) { 1640 seq = ((skb->data[2] & 0x0f) << 8) | skb->data[3]; 1641 mask = 0xfff; 1642 } else { 1643 seq = (skb->data[3] << 16) | (skb->data[4] << 8)| skb->data[5]; 1644 mask = 0xffffff; 1645 } 1646 skb->BEbits = skb->data[2]; 1647 skb_pull(skb, mphdrlen); /* pull off PPP and MP headers */ 1648 1649 /* 1650 * Do protocol ID decompression on the first fragment of each packet. 1651 */ 1652 if ((skb->BEbits & B) && (skb->data[0] & 1)) 1653 *skb_push(skb, 1) = 0; 1654 1655 /* 1656 * Expand sequence number to 32 bits, making it as close 1657 * as possible to ppp->minseq. 1658 */ 1659 seq |= ppp->minseq & ~mask; 1660 if ((int)(ppp->minseq - seq) > (int)(mask >> 1)) 1661 seq += mask + 1; 1662 else if ((int)(seq - ppp->minseq) > (int)(mask >> 1)) 1663 seq -= mask + 1; /* should never happen */ 1664 skb->sequence = seq; 1665 pch->lastseq = seq; 1666 1667 /* 1668 * If this packet comes before the next one we were expecting, 1669 * drop it. 1670 */ 1671 if (seq_before(seq, ppp->nextseq)) { 1672 kfree_skb(skb); 1673 ++ppp->stats.rx_dropped; 1674 ppp_receive_error(ppp); 1675 return; 1676 } 1677 1678 /* 1679 * Reevaluate minseq, the minimum over all channels of the 1680 * last sequence number received on each channel. Because of 1681 * the increasing sequence number rule, we know that any fragment 1682 * before `minseq' which hasn't arrived is never going to arrive. 1683 * The list of channels can't change because we have the receive 1684 * side of the ppp unit locked. 1685 */ 1686 for (l = ppp->channels.next; l != &ppp->channels; l = l->next) { 1687 struct channel *ch = list_entry(l, struct channel, clist); 1688 if (seq_before(ch->lastseq, seq)) 1689 seq = ch->lastseq; 1690 } 1691 if (seq_before(ppp->minseq, seq)) 1692 ppp->minseq = seq; 1693 1694 /* Put the fragment on the reconstruction queue */ 1695 ppp_mp_insert(ppp, skb); 1696 1697 /* If the queue is getting long, don't wait any longer for packets 1698 before the start of the queue. */ 1699 if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN 1700 && seq_before(ppp->minseq, ppp->mrq.next->sequence)) 1701 ppp->minseq = ppp->mrq.next->sequence; 1702 1703 /* Pull completed packets off the queue and receive them. */ 1704 while ((skb = ppp_mp_reconstruct(ppp)) != 0) 1705 ppp_receive_nonmp_frame(ppp, skb); 1706 1707 return; 1708 1709 err: 1710 kfree_skb(skb); 1711 ppp_receive_error(ppp); 1712} 1713 1714/* 1715 * Insert a fragment on the MP reconstruction queue. 1716 * The queue is ordered by increasing sequence number. 1717 */ 1718static void 1719ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb) 1720{ 1721 struct sk_buff *p; 1722 struct sk_buff_head *list = &ppp->mrq; 1723 u32 seq = skb->sequence; 1724 1725 /* N.B. we don't need to lock the list lock because we have the 1726 ppp unit receive-side lock. */ 1727 for (p = list->next; p != (struct sk_buff *)list; p = p->next) 1728 if (seq_before(seq, p->sequence)) 1729 break; 1730 __skb_insert(skb, p->prev, p, list); 1731} 1732 1733/* 1734 * Reconstruct a packet from the MP fragment queue. 1735 * We go through increasing sequence numbers until we find a 1736 * complete packet, or we get to the sequence number for a fragment 1737 * which hasn't arrived but might still do so. 1738 */ 1739struct sk_buff * 1740ppp_mp_reconstruct(struct ppp *ppp) 1741{ 1742 u32 seq = ppp->nextseq; 1743 u32 minseq = ppp->minseq; 1744 struct sk_buff_head *list = &ppp->mrq; 1745 struct sk_buff *p, *next; 1746 struct sk_buff *head, *tail; 1747 struct sk_buff *skb = NULL; 1748 int lost = 0, len = 0; 1749 1750 if (ppp->mrru == 0) /* do nothing until mrru is set */ 1751 return NULL; 1752 head = list->next; 1753 tail = NULL; 1754 for (p = head; p != (struct sk_buff *) list; p = next) { 1755 next = p->next; 1756 if (seq_before(p->sequence, seq)) { 1757 /* this can't happen, anyway ignore the skb */ 1758 printk(KERN_ERR "ppp_mp_reconstruct bad seq %u < %u\n", 1759 p->sequence, seq); 1760 head = next; 1761 continue; 1762 } 1763 if (p->sequence != seq) { 1764 /* Fragment `seq' is missing. If it is after 1765 minseq, it might arrive later, so stop here. */ 1766 if (seq_after(seq, minseq)) 1767 break; 1768 /* Fragment `seq' is lost, keep going. */ 1769 lost = 1; 1770 seq = seq_before(minseq, p->sequence)? 1771 minseq + 1: p->sequence; 1772 next = p; 1773 continue; 1774 } 1775 1776 /* 1777 * At this point we know that all the fragments from 1778 * ppp->nextseq to seq are either present or lost. 1779 * Also, there are no complete packets in the queue 1780 * that have no missing fragments and end before this 1781 * fragment. 1782 */ 1783 1784 /* B bit set indicates this fragment starts a packet */ 1785 if (p->BEbits & B) { 1786 head = p; 1787 lost = 0; 1788 len = 0; 1789 } 1790 1791 len += p->len; 1792 1793 /* Got a complete packet yet? */ 1794 if (lost == 0 && (p->BEbits & E) && (head->BEbits & B)) { 1795 if (len > ppp->mrru + 2) { 1796 ++ppp->stats.rx_length_errors; 1797 printk(KERN_DEBUG "PPP: reconstructed packet" 1798 " is too long (%d)\n", len); 1799 } else if (p == head) { 1800 /* fragment is complete packet - reuse skb */ 1801 tail = p; 1802 skb = skb_get(p); 1803 break; 1804 } else if ((skb = dev_alloc_skb(len)) == NULL) { 1805 ++ppp->stats.rx_missed_errors; 1806 printk(KERN_DEBUG "PPP: no memory for " 1807 "reconstructed packet"); 1808 } else { 1809 tail = p; 1810 break; 1811 } 1812 ppp->nextseq = seq + 1; 1813 } 1814 1815 /* 1816 * If this is the ending fragment of a packet, 1817 * and we haven't found a complete valid packet yet, 1818 * we can discard up to and including this fragment. 1819 */ 1820 if (p->BEbits & E) 1821 head = next; 1822 1823 ++seq; 1824 } 1825 1826 /* If we have a complete packet, copy it all into one skb. */ 1827 if (tail != NULL) { 1828 /* If we have discarded any fragments, 1829 signal a receive error. */ 1830 if (head->sequence != ppp->nextseq) { 1831 if (ppp->debug & 1) 1832 printk(KERN_DEBUG " missed pkts %u..%u\n", 1833 ppp->nextseq, head->sequence-1); 1834 ++ppp->stats.rx_dropped; 1835 ppp_receive_error(ppp); 1836 } 1837 1838 if (head != tail) 1839 /* copy to a single skb */ 1840 for (p = head; p != tail->next; p = p->next) 1841 memcpy(skb_put(skb, p->len), p->data, p->len); 1842 ppp->nextseq = tail->sequence + 1; 1843 head = tail->next; 1844 } 1845 1846 /* Discard all the skbuffs that we have copied the data out of 1847 or that we can't use. */ 1848 while ((p = list->next) != head) { 1849 __skb_unlink(p, list); 1850 kfree_skb(p); 1851 } 1852 1853 return skb; 1854} 1855#endif /* CONFIG_PPP_MULTILINK */ 1856 1857/* 1858 * Channel interface. 1859 */ 1860 1861/* 1862 * Create a new, unattached ppp channel. 1863 */ 1864int 1865ppp_register_channel(struct ppp_channel *chan) 1866{ 1867 struct channel *pch; 1868 1869 pch = kmalloc(sizeof(struct channel), GFP_KERNEL); 1870 if (pch == 0) 1871 return -ENOMEM; 1872 memset(pch, 0, sizeof(struct channel)); 1873 pch->ppp = NULL; 1874 pch->chan = chan; 1875 chan->ppp = pch; 1876 init_ppp_file(&pch->file, CHANNEL); 1877 pch->file.hdrlen = chan->hdrlen; 1878#ifdef CONFIG_PPP_MULTILINK 1879 pch->lastseq = -1; 1880#endif /* CONFIG_PPP_MULTILINK */ 1881 init_rwsem(&pch->chan_sem); 1882 spin_lock_init(&pch->downl); 1883 pch->upl = RW_LOCK_UNLOCKED; 1884 spin_lock_bh(&all_channels_lock); 1885 pch->file.index = ++last_channel_index; 1886 list_add(&pch->list, &new_channels); 1887 atomic_inc(&channel_count); 1888 spin_unlock_bh(&all_channels_lock); 1889 MOD_INC_USE_COUNT; 1890 return 0; 1891} 1892 1893/* 1894 * Return the index of a channel. 1895 */ 1896int ppp_channel_index(struct ppp_channel *chan) 1897{ 1898 struct channel *pch = chan->ppp; 1899 1900 if (pch != 0) 1901 return pch->file.index; 1902 return -1; 1903} 1904 1905/* 1906 * Return the PPP unit number to which a channel is connected. 1907 */ 1908int ppp_unit_number(struct ppp_channel *chan) 1909{ 1910 struct channel *pch = chan->ppp; 1911 int unit = -1; 1912 1913 if (pch != 0) { 1914 read_lock_bh(&pch->upl); 1915 if (pch->ppp != 0) 1916 unit = pch->ppp->file.index; 1917 read_unlock_bh(&pch->upl); 1918 } 1919 return unit; 1920} 1921 1922/* 1923 * Disconnect a channel from the generic layer. 1924 * This must be called in process context. 1925 */ 1926void 1927ppp_unregister_channel(struct ppp_channel *chan) 1928{ 1929 struct channel *pch = chan->ppp; 1930 1931 if (pch == 0) 1932 return; /* should never happen */ 1933 chan->ppp = 0; 1934 1935 /* 1936 * This ensures that we have returned from any calls into the 1937 * the channel's start_xmit or ioctl routine before we proceed. 1938 */ 1939 down_write(&pch->chan_sem); 1940 spin_lock_bh(&pch->downl); 1941 pch->chan = 0; 1942 spin_unlock_bh(&pch->downl); 1943 up_write(&pch->chan_sem); 1944 ppp_disconnect_channel(pch); 1945 spin_lock_bh(&all_channels_lock); 1946 list_del(&pch->list); 1947 spin_unlock_bh(&all_channels_lock); 1948 pch->file.dead = 1; 1949 wake_up_interruptible(&pch->file.rwait); 1950 if (atomic_dec_and_test(&pch->file.refcnt)) 1951 ppp_destroy_channel(pch); 1952 MOD_DEC_USE_COUNT; 1953} 1954 1955/* 1956 * Callback from a channel when it can accept more to transmit. 1957 * This should be called at BH/softirq level, not interrupt level. 1958 */ 1959void 1960ppp_output_wakeup(struct ppp_channel *chan) 1961{ 1962 struct channel *pch = chan->ppp; 1963 1964 if (pch == 0) 1965 return; 1966 ppp_channel_push(pch); 1967} 1968 1969/* 1970 * Compression control. 1971 */ 1972 1973/* Process the PPPIOCSCOMPRESS ioctl. */ 1974static int 1975ppp_set_compress(struct ppp *ppp, unsigned long arg) 1976{ 1977 int err; 1978 struct compressor *cp, *ocomp; 1979 struct ppp_option_data data; 1980 void *state, *ostate; 1981 unsigned char ccp_option[CCP_MAX_OPTION_LENGTH]; 1982#ifdef CONFIG_KMOD 1983 char modname[32]; 1984#endif 1985 1986 err = -EFAULT; 1987 if (copy_from_user(&data, (void *) arg, sizeof(data)) 1988 || (data.length <= CCP_MAX_OPTION_LENGTH 1989 && copy_from_user(ccp_option, data.ptr, data.length))) 1990 goto err1; 1991 err = -EINVAL; 1992 if (data.length > CCP_MAX_OPTION_LENGTH 1993 || ccp_option[1] < 2 || ccp_option[1] > data.length) 1994 goto err1; 1995 1996 cp = find_compressor(ccp_option[0]); 1997#ifdef CONFIG_KMOD 1998 if (cp == 0) { 1999 sprintf(modname, "ppp-compress-%d", ccp_option[0]); 2000 request_module(modname); 2001 cp = find_compressor(ccp_option[0]); 2002 } 2003#endif /* CONFIG_KMOD */ 2004 if (cp == 0) 2005 goto err1; 2006 2007 err = -ENOBUFS; 2008 if (data.transmit) { 2009 state = cp->comp_alloc(ccp_option, data.length); 2010 if (state != 0) { 2011 ppp_xmit_lock(ppp); 2012 ppp->xstate &= ~SC_COMP_RUN; 2013 ocomp = ppp->xcomp; 2014 ostate = ppp->xc_state; 2015 ppp->xcomp = cp; 2016 ppp->xc_state = state; 2017 ppp_xmit_unlock(ppp); 2018 if (ostate != 0) 2019 ocomp->comp_free(ostate); 2020 err = 0; 2021 } 2022 if (ccp_option[0] == CI_MPPE) 2023 /* 2024 * pppd (userland) has reduced the MTU by MPPE_PAD, 2025 * to accomodate "compressor" growth. We must 2026 * increase the space allocated for compressor 2027 * output in ppp_send_frame() accordingly. Note 2028 * that from a purist's view, it may be more correct 2029 * to require multilink and fragment large packets, 2030 * but that seems inefficient compared to this 2031 * little trick. 2032 */ 2033 ppp->xpad = MPPE_PAD; 2034 else 2035 ppp->xpad = 0; 2036 2037 } else { 2038 state = cp->decomp_alloc(ccp_option, data.length); 2039 if (state != 0) { 2040 ppp_recv_lock(ppp); 2041 ppp->rstate &= ~SC_DECOMP_RUN; 2042 ocomp = ppp->rcomp; 2043 ostate = ppp->rc_state; 2044 ppp->rcomp = cp; 2045 ppp->rc_state = state; 2046 ppp_recv_unlock(ppp); 2047 if (ostate != 0) 2048 ocomp->decomp_free(ostate); 2049 err = 0; 2050 } 2051 } 2052 2053 err1: 2054 return err; 2055} 2056 2057/* 2058 * Look at a CCP packet and update our state accordingly. 2059 * We assume the caller has the xmit or recv path locked. 2060 */ 2061static void 2062ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound) 2063{ 2064 unsigned char *dp = skb->data + 2; 2065 int len; 2066 2067 if (skb->len < CCP_HDRLEN + 2 2068 || skb->len < (len = CCP_LENGTH(dp)) + 2) 2069 return; /* too short */ 2070 2071 switch (CCP_CODE(dp)) { 2072 case CCP_CONFREQ: 2073 2074 /* A ConfReq starts negotiation of compression 2075 * in one direction of transmission, 2076 * and hence brings it down...but which way? 2077 * 2078 * Remember: 2079 * A ConfReq indicates what the sender would like to receive 2080 */ 2081 if(inbound) 2082 /* He is proposing what I should send */ 2083 ppp->xstate &= ~SC_COMP_RUN; 2084 else 2085 /* I am proposing to what he should send */ 2086 ppp->rstate &= ~SC_DECOMP_RUN; 2087 2088 break; 2089 2090 case CCP_TERMREQ: 2091 case CCP_TERMACK: 2092 /* 2093 * CCP is going down, both directions of transmission 2094 */ 2095 ppp->rstate &= ~SC_DECOMP_RUN; 2096 ppp->xstate &= ~SC_COMP_RUN; 2097 break; 2098 2099 case CCP_CONFACK: 2100 if ((ppp->flags & (SC_CCP_OPEN | SC_CCP_UP)) != SC_CCP_OPEN) 2101 break; 2102 dp += CCP_HDRLEN; 2103 len -= CCP_HDRLEN; 2104 if (len < CCP_OPT_MINLEN || len < CCP_OPT_LENGTH(dp)) 2105 break; 2106 if (inbound) { 2107 /* we will start receiving compressed packets */ 2108 if (ppp->rc_state == 0) 2109 break; 2110 if (ppp->rcomp->decomp_init(ppp->rc_state, dp, len, 2111 ppp->file.index, 0, ppp->mru, ppp->debug)) { 2112 ppp->rstate |= SC_DECOMP_RUN; 2113 ppp->rstate &= ~(SC_DC_ERROR | SC_DC_FERROR); 2114 } 2115 } else { 2116 /* we will soon start sending compressed packets */ 2117 if (ppp->xc_state == 0) 2118 break; 2119 if (ppp->xcomp->comp_init(ppp->xc_state, dp, len, 2120 ppp->file.index, 0, ppp->debug)) 2121 ppp->xstate |= SC_COMP_RUN; 2122 } 2123 break; 2124 2125 case CCP_RESETACK: 2126 /* reset the [de]compressor */ 2127 if ((ppp->flags & SC_CCP_UP) == 0) 2128 break; 2129 if (inbound) { 2130 if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN)) { 2131 ppp->rcomp->decomp_reset(ppp->rc_state); 2132 ppp->rstate &= ~SC_DC_ERROR; 2133 } 2134 } else { 2135 if (ppp->xc_state && (ppp->xstate & SC_COMP_RUN)) 2136 ppp->xcomp->comp_reset(ppp->xc_state); 2137 } 2138 break; 2139 } 2140} 2141 2142/* Free up compression resources. */ 2143static void 2144ppp_ccp_closed(struct ppp *ppp) 2145{ 2146 void *xstate, *rstate; 2147 struct compressor *xcomp, *rcomp; 2148 2149 ppp_lock(ppp); 2150 ppp->flags &= ~(SC_CCP_OPEN | SC_CCP_UP); 2151 ppp->xstate = 0; 2152 xcomp = ppp->xcomp; 2153 xstate = ppp->xc_state; 2154 ppp->xc_state = 0; 2155 ppp->rstate = 0; 2156 rcomp = ppp->rcomp; 2157 rstate = ppp->rc_state; 2158 ppp->rc_state = 0; 2159 ppp_unlock(ppp); 2160 2161 if (xstate) 2162 xcomp->comp_free(xstate); 2163 if (rstate) 2164 rcomp->decomp_free(rstate); 2165} 2166 2167/* List of compressors. */ 2168static LIST_HEAD(compressor_list); 2169static spinlock_t compressor_list_lock = SPIN_LOCK_UNLOCKED; 2170 2171struct compressor_entry { 2172 struct list_head list; 2173 struct compressor *comp; 2174}; 2175 2176static struct compressor_entry * 2177find_comp_entry(int proto) 2178{ 2179 struct compressor_entry *ce; 2180 struct list_head *list = &compressor_list; 2181 2182 while ((list = list->next) != &compressor_list) { 2183 ce = list_entry(list, struct compressor_entry, list); 2184 if (ce->comp->compress_proto == proto) 2185 return ce; 2186 } 2187 return 0; 2188} 2189 2190/* Register a compressor */ 2191int 2192ppp_register_compressor(struct compressor *cp) 2193{ 2194 struct compressor_entry *ce; 2195 int ret; 2196 spin_lock(&compressor_list_lock); 2197 ret = -EEXIST; 2198 if (find_comp_entry(cp->compress_proto) != 0) 2199 goto err1; 2200 ret = -ENOMEM; 2201 ce = kmalloc(sizeof(struct compressor_entry), GFP_ATOMIC); 2202 if (ce == 0) 2203 goto err1; 2204 ret = 0; 2205 ce->comp = cp; 2206 list_add(&ce->list, &compressor_list); 2207 err1: 2208 spin_unlock(&compressor_list_lock); 2209 return ret; 2210} 2211 2212/* Unregister a compressor */ 2213void 2214ppp_unregister_compressor(struct compressor *cp) 2215{ 2216 struct compressor_entry *ce; 2217 2218 spin_lock(&compressor_list_lock); 2219 ce = find_comp_entry(cp->compress_proto); 2220 if (ce != 0 && ce->comp == cp) { 2221 list_del(&ce->list); 2222 kfree(ce); 2223 } 2224 spin_unlock(&compressor_list_lock); 2225} 2226 2227/* Find a compressor. */ 2228static struct compressor * 2229find_compressor(int type) 2230{ 2231 struct compressor_entry *ce; 2232 struct compressor *cp = 0; 2233 2234 spin_lock(&compressor_list_lock); 2235 ce = find_comp_entry(type); 2236 if (ce != 0) 2237 cp = ce->comp; 2238 spin_unlock(&compressor_list_lock); 2239 return cp; 2240} 2241 2242/* 2243 * Miscelleneous stuff. 2244 */ 2245 2246static void 2247ppp_get_stats(struct ppp *ppp, struct ppp_stats *st) 2248{ 2249 struct slcompress *vj = ppp->vj; 2250 2251 memset(st, 0, sizeof(*st)); 2252 st->p.ppp_ipackets = ppp->stats.rx_packets; 2253 st->p.ppp_ierrors = ppp->stats.rx_errors; 2254 st->p.ppp_ibytes = ppp->stats.rx_bytes; 2255 st->p.ppp_opackets = ppp->stats.tx_packets; 2256 st->p.ppp_oerrors = ppp->stats.tx_errors; 2257 st->p.ppp_obytes = ppp->stats.tx_bytes; 2258 if (vj == 0) 2259 return; 2260 st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed; 2261 st->vj.vjs_compressed = vj->sls_o_compressed; 2262 st->vj.vjs_searches = vj->sls_o_searches; 2263 st->vj.vjs_misses = vj->sls_o_misses; 2264 st->vj.vjs_errorin = vj->sls_i_error; 2265 st->vj.vjs_tossed = vj->sls_i_tossed; 2266 st->vj.vjs_uncompressedin = vj->sls_i_uncompressed; 2267 st->vj.vjs_compressedin = vj->sls_i_compressed; 2268} 2269 2270/* 2271 * Stuff for handling the lists of ppp units and channels 2272 * and for initialization. 2273 */ 2274 2275/* 2276 * Create a new ppp interface unit. Fails if it can't allocate memory 2277 * or if there is already a unit with the requested number. 2278 * unit == -1 means allocate a new number. 2279 */ 2280static struct ppp * 2281ppp_create_interface(int unit, int *retp) 2282{ 2283 struct ppp *ppp; 2284 struct net_device *dev = NULL; 2285 int ret = -ENOMEM; 2286 int i; 2287 2288 ppp = kmalloc(sizeof(struct ppp), GFP_KERNEL); 2289 if (ppp == 0) 2290 goto err; 2291 dev = kmalloc(sizeof(struct net_device), GFP_KERNEL); 2292 if (dev == 0) 2293 goto err; 2294 memset(ppp, 0, sizeof(struct ppp)); 2295 memset(dev, 0, sizeof(struct net_device)); 2296 2297 ret = -EEXIST; 2298 down(&all_ppp_sem); 2299 if (unit < 0) 2300 unit = cardmap_find_first_free(all_ppp_units); 2301 else if (cardmap_get(all_ppp_units, unit) != NULL) 2302 goto err_unlock; /* unit already exists */ 2303 2304 /* Initialize the new ppp unit */ 2305 ppp->file.index = unit; 2306 ppp->mru = PPP_MRU; 2307 ppp->mru_alloc = PPP_MRU; 2308 init_ppp_file(&ppp->file, INTERFACE); 2309 ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */ 2310 for (i = 0; i < NUM_NP; ++i) 2311 ppp->npmode[i] = NPMODE_PASS; 2312 INIT_LIST_HEAD(&ppp->channels); 2313 spin_lock_init(&ppp->rlock); 2314 spin_lock_init(&ppp->wlock); 2315#ifdef CONFIG_PPP_MULTILINK 2316 ppp->minseq = -1; 2317 skb_queue_head_init(&ppp->mrq); 2318#endif /* CONFIG_PPP_MULTILINK */ 2319 2320 ppp->dev = dev; 2321 dev->init = ppp_net_init; 2322 sprintf(dev->name, "ppp%d", unit); 2323 dev->priv = ppp; 2324 dev->features |= NETIF_F_DYNALLOC; 2325 2326 rtnl_lock(); 2327 ret = register_netdevice(dev); 2328 rtnl_unlock(); 2329 if (ret != 0) { 2330 printk(KERN_ERR "PPP: couldn't register device %s (%d)\n", 2331 dev->name, ret); 2332 goto err_unlock; 2333 } 2334 2335 atomic_inc(&ppp_unit_count); 2336 cardmap_set(&all_ppp_units, unit, ppp); 2337 up(&all_ppp_sem); 2338 *retp = 0; 2339 return ppp; 2340 2341 err_unlock: 2342 up(&all_ppp_sem); 2343 err: 2344 *retp = ret; 2345 if (ppp) 2346 kfree(ppp); 2347 if (dev) 2348 kfree(dev); 2349 return NULL; 2350} 2351 2352/* 2353 * Initialize a ppp_file structure. 2354 */ 2355static void 2356init_ppp_file(struct ppp_file *pf, int kind) 2357{ 2358 pf->kind = kind; 2359 skb_queue_head_init(&pf->xq); 2360 skb_queue_head_init(&pf->rq); 2361 atomic_set(&pf->refcnt, 1); 2362 init_waitqueue_head(&pf->rwait); 2363} 2364 2365/* 2366 * Take down a ppp interface unit - called when the owning file 2367 * (the one that created the unit) is closed or detached. 2368 */ 2369static void ppp_shutdown_interface(struct ppp *ppp) 2370{ 2371 struct net_device *dev; 2372 2373 down(&all_ppp_sem); 2374 ppp_lock(ppp); 2375 dev = ppp->dev; 2376 ppp->dev = 0; 2377 ppp_unlock(ppp); 2378 if (dev) { 2379 rtnl_lock(); 2380 dev_close(dev); 2381 unregister_netdevice(dev); 2382 rtnl_unlock(); 2383 } 2384 cardmap_set(&all_ppp_units, ppp->file.index, NULL); 2385 ppp->file.dead = 1; 2386 ppp->owner = NULL; 2387 wake_up_interruptible(&ppp->file.rwait); 2388 up(&all_ppp_sem); 2389} 2390 2391/* 2392 * Free the memory used by a ppp unit. This is only called once 2393 * there are no channels connected to the unit and no file structs 2394 * that reference the unit. 2395 */ 2396static void ppp_destroy_interface(struct ppp *ppp) 2397{ 2398 atomic_dec(&ppp_unit_count); 2399 2400 if (!ppp->file.dead || ppp->n_channels) { 2401 /* "can't happen" */ 2402 printk(KERN_ERR "ppp: destroying ppp struct %p but dead=%d " 2403 "n_channels=%d !\n", ppp, ppp->file.dead, 2404 ppp->n_channels); 2405 return; 2406 } 2407 2408 ppp_ccp_closed(ppp); 2409 if (ppp->vj) { 2410 slhc_free(ppp->vj); 2411 ppp->vj = 0; 2412 } 2413 skb_queue_purge(&ppp->file.xq); 2414 skb_queue_purge(&ppp->file.rq); 2415#ifdef CONFIG_PPP_MULTILINK 2416 skb_queue_purge(&ppp->mrq); 2417#endif /* CONFIG_PPP_MULTILINK */ 2418#ifdef CONFIG_PPP_FILTER 2419 if (ppp->pass_filter.filter) { 2420 kfree(ppp->pass_filter.filter); 2421 ppp->pass_filter.filter = NULL; 2422 } 2423 if (ppp->active_filter.filter) { 2424 kfree(ppp->active_filter.filter); 2425 ppp->active_filter.filter = 0; 2426 } 2427#endif /* CONFIG_PPP_FILTER */ 2428 2429 kfree(ppp); 2430} 2431 2432/* 2433 * Locate an existing ppp unit. 2434 * The caller should have locked the all_ppp_sem. 2435 */ 2436static struct ppp * 2437ppp_find_unit(int unit) 2438{ 2439 return cardmap_get(all_ppp_units, unit); 2440} 2441 2442/* 2443 * Locate an existing ppp channel. 2444 * The caller should have locked the all_channels_lock. 2445 * First we look in the new_channels list, then in the 2446 * all_channels list. If found in the new_channels list, 2447 * we move it to the all_channels list. This is for speed 2448 * when we have a lot of channels in use. 2449 */ 2450static struct channel * 2451ppp_find_channel(int unit) 2452{ 2453 struct channel *pch; 2454 struct list_head *list; 2455 2456 list = &new_channels; 2457 while ((list = list->next) != &new_channels) { 2458 pch = list_entry(list, struct channel, list); 2459 if (pch->file.index == unit) { 2460 list_del(&pch->list); 2461 list_add(&pch->list, &all_channels); 2462 return pch; 2463 } 2464 } 2465 list = &all_channels; 2466 while ((list = list->next) != &all_channels) { 2467 pch = list_entry(list, struct channel, list); 2468 if (pch->file.index == unit) 2469 return pch; 2470 } 2471 return 0; 2472} 2473 2474/* 2475 * Connect a PPP channel to a PPP interface unit. 2476 */ 2477static int 2478ppp_connect_channel(struct channel *pch, int unit) 2479{ 2480 struct ppp *ppp; 2481 int ret = -ENXIO; 2482 int hdrlen; 2483 2484 down(&all_ppp_sem); 2485 ppp = ppp_find_unit(unit); 2486 if (ppp == 0) 2487 goto err1; 2488 2489 write_lock_bh(&pch->upl); 2490 ret = -EINVAL; 2491 if (pch->ppp != 0) 2492 goto err2; 2493 2494 ppp_lock(ppp); 2495 if (pch->file.hdrlen > ppp->file.hdrlen) 2496 ppp->file.hdrlen = pch->file.hdrlen; 2497 hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */ 2498 if (ppp->dev && hdrlen > ppp->dev->hard_header_len) 2499 ppp->dev->hard_header_len = hdrlen; 2500 list_add_tail(&pch->clist, &ppp->channels); 2501 ++ppp->n_channels; 2502 pch->ppp = ppp; 2503 atomic_inc(&ppp->file.refcnt); 2504 ppp_unlock(ppp); 2505 ret = 0; 2506 2507 err2: 2508 write_unlock_bh(&pch->upl); 2509 err1: 2510 up(&all_ppp_sem); 2511 return ret; 2512} 2513 2514/* 2515 * Disconnect a channel from its ppp unit. 2516 */ 2517static int 2518ppp_disconnect_channel(struct channel *pch) 2519{ 2520 struct ppp *ppp; 2521 int err = -EINVAL; 2522 2523 write_lock_bh(&pch->upl); 2524 ppp = pch->ppp; 2525 pch->ppp = NULL; 2526 write_unlock_bh(&pch->upl); 2527 if (ppp != 0) { 2528 /* remove it from the ppp unit's list */ 2529 ppp_lock(ppp); 2530 list_del(&pch->clist); 2531 --ppp->n_channels; 2532 ppp_unlock(ppp); 2533 if (atomic_dec_and_test(&ppp->file.refcnt)) 2534 ppp_destroy_interface(ppp); 2535 err = 0; 2536 } 2537 return err; 2538} 2539 2540/* 2541 * Free up the resources used by a ppp channel. 2542 */ 2543static void ppp_destroy_channel(struct channel *pch) 2544{ 2545 atomic_dec(&channel_count); 2546 2547 if (!pch->file.dead) { 2548 /* "can't happen" */ 2549 printk(KERN_ERR "ppp: destroying undead channel %p !\n", 2550 pch); 2551 return; 2552 } 2553 skb_queue_purge(&pch->file.xq); 2554 skb_queue_purge(&pch->file.rq); 2555 kfree(pch); 2556} 2557 2558static void __exit ppp_cleanup(void) 2559{ 2560 /* should never happen */ 2561 if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count)) 2562 printk(KERN_ERR "PPP: removing module but units remain!\n"); 2563 cardmap_destroy(&all_ppp_units); 2564 if (devfs_unregister_chrdev(PPP_MAJOR, "ppp") != 0) 2565 printk(KERN_ERR "PPP: failed to unregister PPP device\n"); 2566 devfs_unregister(devfs_handle); 2567} 2568 2569/* 2570 * Cardmap implementation. 2571 */ 2572static void *cardmap_get(struct cardmap *map, unsigned int nr) 2573{ 2574 struct cardmap *p; 2575 int i; 2576 2577 for (p = map; p != NULL; ) { 2578 if ((i = nr >> p->shift) >= CARDMAP_WIDTH) 2579 return NULL; 2580 if (p->shift == 0) 2581 return p->ptr[i]; 2582 nr &= ~(CARDMAP_MASK << p->shift); 2583 p = p->ptr[i]; 2584 } 2585 return NULL; 2586} 2587 2588static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr) 2589{ 2590 struct cardmap *p; 2591 int i; 2592 2593 p = *pmap; 2594 if (p == NULL || (nr >> p->shift) >= CARDMAP_WIDTH) { 2595 do { 2596 /* need a new top level */ 2597 struct cardmap *np = kmalloc(sizeof(*np), GFP_KERNEL); 2598 memset(np, 0, sizeof(*np)); 2599 np->ptr[0] = p; 2600 if (p != NULL) { 2601 np->shift = p->shift + CARDMAP_ORDER; 2602 p->parent = np; 2603 } else 2604 np->shift = 0; 2605 p = np; 2606 } while ((nr >> p->shift) >= CARDMAP_WIDTH); 2607 *pmap = p; 2608 } 2609 while (p->shift > 0) { 2610 i = (nr >> p->shift) & CARDMAP_MASK; 2611 if (p->ptr[i] == NULL) { 2612 struct cardmap *np = kmalloc(sizeof(*np), GFP_KERNEL); 2613 memset(np, 0, sizeof(*np)); 2614 np->shift = p->shift - CARDMAP_ORDER; 2615 np->parent = p; 2616 p->ptr[i] = np; 2617 } 2618 if (ptr == NULL) 2619 clear_bit(i, &p->inuse); 2620 p = p->ptr[i]; 2621 } 2622 i = nr & CARDMAP_MASK; 2623 p->ptr[i] = ptr; 2624 if (ptr != NULL) 2625 set_bit(i, &p->inuse); 2626 else 2627 clear_bit(i, &p->inuse); 2628} 2629 2630static unsigned int cardmap_find_first_free(struct cardmap *map) 2631{ 2632 struct cardmap *p; 2633 unsigned int nr = 0; 2634 int i; 2635 2636 if ((p = map) == NULL) 2637 return 0; 2638 for (;;) { 2639 i = find_first_zero_bit(&p->inuse, CARDMAP_WIDTH); 2640 if (i >= CARDMAP_WIDTH) { 2641 if (p->parent == NULL) 2642 return CARDMAP_WIDTH << p->shift; 2643 p = p->parent; 2644 i = (nr >> p->shift) & CARDMAP_MASK; 2645 set_bit(i, &p->inuse); 2646 continue; 2647 } 2648 nr = (nr & (~CARDMAP_MASK << p->shift)) | (i << p->shift); 2649 if (p->shift == 0 || p->ptr[i] == NULL) 2650 return nr; 2651 p = p->ptr[i]; 2652 } 2653} 2654 2655static void cardmap_destroy(struct cardmap **pmap) 2656{ 2657 struct cardmap *p, *np; 2658 int i; 2659 2660 for (p = *pmap; p != NULL; p = np) { 2661 if (p->shift != 0) { 2662 for (i = 0; i < CARDMAP_WIDTH; ++i) 2663 if (p->ptr[i] != NULL) 2664 break; 2665 if (i < CARDMAP_WIDTH) { 2666 np = p->ptr[i]; 2667 p->ptr[i] = NULL; 2668 continue; 2669 } 2670 } 2671 np = p->parent; 2672 kfree(p); 2673 } 2674 *pmap = NULL; 2675} 2676 2677/* Module/initialization stuff */ 2678 2679module_init(ppp_init); 2680module_exit(ppp_cleanup); 2681 2682EXPORT_SYMBOL(ppp_register_channel); 2683EXPORT_SYMBOL(ppp_unregister_channel); 2684EXPORT_SYMBOL(ppp_channel_index); 2685EXPORT_SYMBOL(ppp_unit_number); 2686EXPORT_SYMBOL(ppp_input); 2687EXPORT_SYMBOL(ppp_input_error); 2688EXPORT_SYMBOL(ppp_output_wakeup); 2689EXPORT_SYMBOL(ppp_register_compressor); 2690EXPORT_SYMBOL(ppp_unregister_compressor); 2691EXPORT_SYMBOL(all_ppp_units); /* for debugging */ 2692EXPORT_SYMBOL(all_channels); /* for debugging */ 2693MODULE_LICENSE("GPL"); 2694