1/* 2 * Generic PPP layer for Linux. 3 * 4 * Copyright 1999-2002 Paul Mackerras. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 * The generic PPP layer handles the PPP network interfaces, the 12 * /dev/ppp device, packet and VJ compression, and multilink. 13 * It talks to PPP `channels' via the interface defined in 14 * include/linux/ppp_channel.h. Channels provide the basic means for 15 * sending and receiving PPP frames on some kind of communications 16 * channel. 17 * 18 * Part of the code in this driver was inspired by the old async-only 19 * PPP driver, written by Michael Callahan and Al Longyear, and 20 * subsequently hacked by Paul Mackerras. 21 * 22 * ==FILEVERSION 20041108== 23 */ 24 25#include <linux/module.h> 26#include <linux/kernel.h> 27#include <linux/kmod.h> 28#include <linux/init.h> 29#include <linux/list.h> 30#include <linux/netdevice.h> 31#include <linux/poll.h> 32#include <linux/ppp_defs.h> 33#include <linux/filter.h> 34#include <linux/if_ppp.h> 35#include <linux/ppp_channel.h> 36#include <linux/ppp-comp.h> 37#include <linux/skbuff.h> 38#include <linux/rtnetlink.h> 39#include <linux/if_arp.h> 40#include <linux/ip.h> 41#include <linux/tcp.h> 42#include <linux/spinlock.h> 43#include <linux/rwsem.h> 44#include <linux/stddef.h> 45#include <linux/device.h> 46#include <linux/mutex.h> 47#include <net/slhc_vj.h> 48#include <asm/atomic.h> 49 50#ifdef HNDCTF 51#define TYPEDEF_INT32 52#include <ctf/hndctf.h> 53#endif 54 55#define PPP_VERSION "2.4.2" 56 57/* 58 * Network protocols we support. 59 */ 60#define NP_IP 0 /* Internet Protocol V4 */ 61#define NP_IPV6 1 /* Internet Protocol V6 */ 62#define NP_IPX 2 /* IPX protocol */ 63#define NP_AT 3 /* Appletalk protocol */ 64#define NP_MPLS_UC 4 /* MPLS unicast */ 65#define NP_MPLS_MC 5 /* MPLS multicast */ 66#define NUM_NP 6 /* Number of NPs. */ 67 68#define MPHDRLEN 6 /* multilink protocol header length */ 69#define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */ 70#define MIN_FRAG_SIZE 64 71 72/* 73 * An instance of /dev/ppp can be associated with either a ppp 74 * interface unit or a ppp channel. In both cases, file->private_data 75 * points to one of these. 76 */ 77struct ppp_file { 78 enum { 79 INTERFACE=1, CHANNEL 80 } kind; 81 struct sk_buff_head xq; /* pppd transmit queue */ 82 struct sk_buff_head rq; /* receive queue for pppd */ 83 wait_queue_head_t rwait; /* for poll on reading /dev/ppp */ 84 atomic_t refcnt; /* # refs (incl /dev/ppp attached) */ 85 int hdrlen; /* space to leave for headers */ 86 int index; /* interface unit / channel number */ 87 int dead; /* unit/channel has been shut down */ 88}; 89 90#define PF_TO_X(pf, X) container_of(pf, X, file) 91 92#define PF_TO_PPP(pf) PF_TO_X(pf, struct ppp) 93#define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel) 94 95/* 96 * Data structure describing one ppp unit. 97 * A ppp unit corresponds to a ppp network interface device 98 * and represents a multilink bundle. 99 * It can have 0 or more ppp channels connected to it. 100 */ 101struct ppp { 102 struct ppp_file file; /* stuff for read/write/poll 0 */ 103 struct file *owner; /* file that owns this unit 48 */ 104 struct list_head channels; /* list of attached channels 4c */ 105 int n_channels; /* how many channels are attached 54 */ 106 spinlock_t rlock; /* lock for receive side 58 */ 107 spinlock_t wlock; /* lock for transmit side 5c */ 108 int mru; /* max receive unit 60 */ 109 unsigned int flags; /* control bits 64 */ 110 unsigned int xstate; /* transmit state bits 68 */ 111 unsigned int rstate; /* receive state bits 6c */ 112 int debug; /* debug flags 70 */ 113 struct slcompress *vj; /* state for VJ header compression */ 114 enum NPmode npmode[NUM_NP]; /* what to do with each net proto 78 */ 115 struct sk_buff *xmit_pending; /* a packet ready to go out 88 */ 116 struct compressor *xcomp; /* transmit packet compressor 8c */ 117 void *xc_state; /* its internal state 90 */ 118 struct compressor *rcomp; /* receive decompressor 94 */ 119 void *rc_state; /* its internal state 98 */ 120 unsigned long last_xmit; /* jiffies when last pkt sent 9c */ 121 unsigned long last_recv; /* jiffies when last pkt rcvd a0 */ 122 struct net_device *dev; /* network interface device a4 */ 123#ifdef CONFIG_PPP_MULTILINK 124 int nxchan; /* next channel to send something on */ 125 u32 nxseq; /* next sequence number to send */ 126 int mrru; /* MP: max reconst. receive unit */ 127 u32 nextseq; /* MP: seq no of next packet */ 128 u32 minseq; /* MP: min of most recent seqnos */ 129 struct sk_buff_head mrq; /* MP: receive reconstruction queue */ 130#endif /* CONFIG_PPP_MULTILINK */ 131 struct net_device_stats stats; /* statistics */ 132#ifdef CONFIG_PPP_FILTER 133 struct sock_filter *pass_filter; /* filter for packets to pass */ 134 struct sock_filter *active_filter;/* filter for pkts to reset idle */ 135 unsigned pass_len, active_len; 136#endif /* CONFIG_PPP_FILTER */ 137}; 138 139/* 140 * Bits in flags: SC_NO_TCP_CCID, SC_CCP_OPEN, SC_CCP_UP, SC_LOOP_TRAFFIC, 141 * SC_MULTILINK, SC_MP_SHORTSEQ, SC_MP_XSHORTSEQ, SC_COMP_TCP, SC_REJ_COMP_TCP, 142 * SC_MUST_COMP 143 * Bits in rstate: SC_DECOMP_RUN, SC_DC_ERROR, SC_DC_FERROR. 144 * Bits in xstate: SC_COMP_RUN 145 */ 146#define SC_FLAG_BITS (SC_NO_TCP_CCID|SC_CCP_OPEN|SC_CCP_UP|SC_LOOP_TRAFFIC \ 147 |SC_MULTILINK|SC_MP_SHORTSEQ|SC_MP_XSHORTSEQ \ 148 |SC_COMP_TCP|SC_REJ_COMP_TCP|SC_MUST_COMP) 149 150/* 151 * Private data structure for each channel. 152 * This includes the data structure used for multilink. 153 */ 154struct channel { 155 struct ppp_file file; /* stuff for read/write/poll */ 156 struct list_head list; /* link in all/new_channels list */ 157 struct ppp_channel *chan; /* public channel data structure */ 158 struct rw_semaphore chan_sem; /* protects `chan' during chan ioctl */ 159 spinlock_t downl; /* protects `chan', file.xq dequeue */ 160 struct ppp *ppp; /* ppp unit we're connected to */ 161 struct list_head clist; /* link in list of channels per unit */ 162 rwlock_t upl; /* protects `ppp' */ 163#ifdef CONFIG_PPP_MULTILINK 164 u8 avail; /* flag used in multilink stuff */ 165 u8 had_frag; /* >= 1 fragments have been sent */ 166 u32 lastseq; /* MP: last sequence # received */ 167#endif /* CONFIG_PPP_MULTILINK */ 168}; 169 170/* 171 * SMP locking issues: 172 * Both the ppp.rlock and ppp.wlock locks protect the ppp.channels 173 * list and the ppp.n_channels field, you need to take both locks 174 * before you modify them. 175 * The lock ordering is: channel.upl -> ppp.wlock -> ppp.rlock -> 176 * channel.downl. 177 */ 178 179/* 180 * A cardmap represents a mapping from unsigned integers to pointers, 181 * and provides a fast "find lowest unused number" operation. 182 * It uses a broad (32-way) tree with a bitmap at each level. 183 * It is designed to be space-efficient for small numbers of entries 184 * and time-efficient for large numbers of entries. 185 */ 186#define CARDMAP_ORDER 5 187#define CARDMAP_WIDTH (1U << CARDMAP_ORDER) 188#define CARDMAP_MASK (CARDMAP_WIDTH - 1) 189 190struct cardmap { 191 int shift; 192 unsigned long inuse; 193 struct cardmap *parent; 194 void *ptr[CARDMAP_WIDTH]; 195}; 196static void *cardmap_get(struct cardmap *map, unsigned int nr); 197static int cardmap_set(struct cardmap **map, unsigned int nr, void *ptr); 198static unsigned int cardmap_find_first_free(struct cardmap *map); 199static void cardmap_destroy(struct cardmap **map); 200 201/* 202 * all_ppp_mutex protects the all_ppp_units mapping. 203 * It also ensures that finding a ppp unit in the all_ppp_units map 204 * and updating its file.refcnt field is atomic. 205 */ 206static DEFINE_MUTEX(all_ppp_mutex); 207static struct cardmap *all_ppp_units; 208static atomic_t ppp_unit_count = ATOMIC_INIT(0); 209/* 210 * all_channels_lock protects all_channels and last_channel_index, 211 * and the atomicity of find a channel and updating its file.refcnt 212 * field. 213 */ 214static DEFINE_SPINLOCK(all_channels_lock); 215static LIST_HEAD(all_channels); 216static LIST_HEAD(new_channels); 217static int last_channel_index; 218static atomic_t channel_count = ATOMIC_INIT(0); 219 220/* Get the PPP protocol number from a skb */ 221#define PPP_PROTO(skb) (((skb)->data[0] << 8) + (skb)->data[1]) 222 223/* We limit the length of ppp->file.rq to this (arbitrary) value */ 224#define PPP_MAX_RQLEN 32 225 226/* 227 * Maximum number of multilink fragments queued up. 228 * This has to be large enough to cope with the maximum latency of 229 * the slowest channel relative to the others. Strictly it should 230 * depend on the number of channels and their characteristics. 231 */ 232#define PPP_MP_MAX_QLEN 128 233 234/* Multilink header bits. */ 235#define B 0x80 /* this fragment begins a packet */ 236#define E 0x40 /* this fragment ends a packet */ 237 238/* Compare multilink sequence numbers (assumed to be 32 bits wide) */ 239#define seq_before(a, b) ((s32)((a) - (b)) < 0) 240#define seq_after(a, b) ((s32)((a) - (b)) > 0) 241 242/* Prototypes. */ 243static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file, 244 unsigned int cmd, unsigned long arg); 245static void ppp_xmit_process(struct ppp *ppp); 246static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb); 247static void ppp_push(struct ppp *ppp); 248static void ppp_channel_push(struct channel *pch); 249static void ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, 250 struct channel *pch); 251static void ppp_receive_error(struct ppp *ppp); 252static void ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb); 253static struct sk_buff *ppp_decompress_frame(struct ppp *ppp, 254 struct sk_buff *skb); 255#ifdef CONFIG_PPP_MULTILINK 256static void ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, 257 struct channel *pch); 258static void ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb); 259static struct sk_buff *ppp_mp_reconstruct(struct ppp *ppp); 260static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb); 261#endif /* CONFIG_PPP_MULTILINK */ 262static int ppp_set_compress(struct ppp *ppp, unsigned long arg); 263static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound); 264static void ppp_ccp_closed(struct ppp *ppp); 265static struct compressor *find_compressor(int type); 266static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st); 267static struct ppp *ppp_create_interface(int unit, int *retp); 268static void init_ppp_file(struct ppp_file *pf, int kind); 269static void ppp_shutdown_interface(struct ppp *ppp); 270static void ppp_destroy_interface(struct ppp *ppp); 271static struct ppp *ppp_find_unit(int unit); 272static struct channel *ppp_find_channel(int unit); 273static int ppp_connect_channel(struct channel *pch, int unit); 274static int ppp_disconnect_channel(struct channel *pch); 275static void ppp_destroy_channel(struct channel *pch); 276 277/* Foxconn added start pling 03/28/2006 */ 278/********************************************************************** 279* FUNCTION: computeTCPChecksum 280* ARGUMENTS: 281* ipHdr -- pointer to IP header 282* tcpHdr -- pointer to TCP header 283* RETURNS: 284* The computed TCP checksum 285***********************************************************************/ 286#define UINT16 unsigned short 287#define UINT32 unsigned long 288static UINT16 computeTCPChecksum(unsigned char *ipHdr, unsigned char *tcpHdr) 289{ 290 UINT32 sum = 0; 291 UINT16 count = ipHdr[2] * 256 + ipHdr[3]; 292 unsigned char *addr = tcpHdr; 293 unsigned char pseudoHeader[12]; 294 295 /* Count number of bytes in TCP header and data */ 296 count -= (ipHdr[0] & 0x0F) * 4; 297 298 memcpy(pseudoHeader, ipHdr+12, 8); 299 pseudoHeader[8] = 0; 300 pseudoHeader[9] = ipHdr[9]; 301 pseudoHeader[10] = (count >> 8) & 0xFF; 302 pseudoHeader[11] = (count & 0xFF); 303 304 /* Checksum the pseudo-header */ 305 sum += * (UINT16 *) pseudoHeader; 306 sum += * ((UINT16 *) (pseudoHeader+2)); 307 sum += * ((UINT16 *) (pseudoHeader+4)); 308 sum += * ((UINT16 *) (pseudoHeader+6)); 309 sum += * ((UINT16 *) (pseudoHeader+8)); 310 sum += * ((UINT16 *) (pseudoHeader+10)); 311 312 /* Checksum the TCP header and data */ 313 while (count > 1) { 314 sum += * (UINT16 *) addr; 315 addr += 2; 316 count -= 2; 317 } 318 319 if (count > 0) { 320 sum += *addr; 321 } 322 323 while(sum >> 16) { 324 sum = (sum & 0xffff) + (sum >> 16); 325 } 326 327 return (UINT16) (~sum & 0xFFFF); 328} 329 330/********************************************************************** 331* FUNCTION: ppp_modify_tcp_mss 332* ARGUMENTS: 333* payload -- IP packet 334* payload_len -- either "incoming" or "outgoing" 335* clampMss -- clamp value 336* RETURNS: 337* Nothing 338* DESCRIPTION: 339* Clamps MSS option if TCP SYN flag is set. 340***********************************************************************/ 341void ppp_modify_tcp_mss(unsigned char *payload, int clampMss) 342{ 343 unsigned char *tcpHdr; 344 unsigned char *ipHdr; 345 unsigned char *opt; 346 unsigned char *endHdr; 347 unsigned char *mssopt = NULL; 348 UINT16 csum; 349 350 /* Make sure this is IP packet */ 351 if (payload[0] != 0x00 || payload[1] != 0x21) { 352 return; 353 } 354 355 ipHdr = &payload[2]; 356 357 /* Verify once more that it's IPv4 */ 358 if ((ipHdr[0] & 0xF0) != 0x40) { 359 return; 360 } 361 362 /* Is it a fragment that's not at the beginning of the packet? */ 363 if ((ipHdr[6] & 0x1F) || ipHdr[7]) { 364 /* Yup, don't touch! */ 365 return; 366 } 367 368 /* Is it TCP? */ 369 if (ipHdr[9] != 0x06) { 370 return; 371 } 372 373 /* Get start of TCP header */ 374 tcpHdr = ipHdr + (ipHdr[0] & 0x0F) * 4; 375 376 /* Is SYN set? */ 377 if (!(tcpHdr[13] & 0x02)) { 378 return; 379 } 380 381 /* Compute and verify TCP checksum -- do not touch a packet with a bad 382 checksum */ 383 csum = computeTCPChecksum(ipHdr, tcpHdr); 384 if (csum) { 385 /* printk("Bad TCP checksum %x", (unsigned int) csum);*/ 386 387 /* Upper layers will drop it */ 388 return; 389 } 390 391 /* Look for existing MSS option */ 392 endHdr = tcpHdr + ((tcpHdr[12] & 0xF0) >> 2); 393 opt = tcpHdr + 20; 394 while (opt < endHdr) { 395 if (!*opt) 396 break; /* End of options */ 397 398 switch(*opt) { 399 case 1: 400 opt++; 401 break; 402 403 case 2: 404 if (opt[1] != 4) { 405 /* Something fishy about MSS option length. */ 406 printk("Bogus length for MSS option (%u) from %u.%u.%u.%u", 407 (unsigned int) opt[1], 408 (unsigned int) ipHdr[12], 409 (unsigned int) ipHdr[13], 410 (unsigned int) ipHdr[14], 411 (unsigned int) ipHdr[15]); 412 return; 413 } 414 mssopt = opt; 415 break; 416 417 default: 418 if (opt[1] < 2) { 419 /* Someone's trying to attack us? */ 420 printk("Bogus TCP option length (%u) from %u.%u.%u.%u", 421 (unsigned int) opt[1], 422 (unsigned int) ipHdr[12], 423 (unsigned int) ipHdr[13], 424 (unsigned int) ipHdr[14], 425 (unsigned int) ipHdr[15]); 426 return; 427 } 428 opt += (opt[1]); 429 break; 430 } 431 432 /* Found existing MSS option? */ 433 if (mssopt) { 434 break; 435 } 436 } 437 438 /* If MSS exists and it's low enough, do nothing */ 439 if (mssopt) { 440 unsigned mss = mssopt[2] * 256 + mssopt[3]; 441 if (mss <= clampMss) { 442 return; 443 } 444 445 mssopt[2] = (((unsigned) clampMss) >> 8) & 0xFF; 446 mssopt[3] = ((unsigned) clampMss) & 0xFF; 447 448 /*printk("%s: Modified MSS value to %d\n", __FUNCTION__, clampMss);*/ 449 } else { 450 /* No MSS option. Don't add one; we'll have to use 536. */ 451 return; 452 } 453 454 /* Recompute TCP checksum */ 455 tcpHdr[16] = 0; 456 tcpHdr[17] = 0; 457 csum = computeTCPChecksum(ipHdr, tcpHdr); 458 (* (UINT16 *) (tcpHdr+16)) = csum; 459} 460#undef UINT16 461#undef UINT32 462/* Foxconn added end pling 03/28/2006 */ 463 464static struct class *ppp_class; 465 466/* Translates a PPP protocol number to a NP index (NP == network protocol) */ 467static inline int proto_to_npindex(int proto) 468{ 469 switch (proto) { 470 case PPP_IP: 471 return NP_IP; 472 case PPP_IPV6: 473 return NP_IPV6; 474 case PPP_IPX: 475 return NP_IPX; 476 case PPP_AT: 477 return NP_AT; 478 case PPP_MPLS_UC: 479 return NP_MPLS_UC; 480 case PPP_MPLS_MC: 481 return NP_MPLS_MC; 482 } 483 return -EINVAL; 484} 485 486/* Translates an NP index into a PPP protocol number */ 487static const int npindex_to_proto[NUM_NP] = { 488 PPP_IP, 489 PPP_IPV6, 490 PPP_IPX, 491 PPP_AT, 492 PPP_MPLS_UC, 493 PPP_MPLS_MC, 494}; 495 496/* Translates an ethertype into an NP index */ 497static inline int ethertype_to_npindex(int ethertype) 498{ 499 switch (ethertype) { 500 case ETH_P_IP: 501 return NP_IP; 502 case ETH_P_IPV6: 503 return NP_IPV6; 504 case ETH_P_IPX: 505 return NP_IPX; 506 case ETH_P_PPPTALK: 507 case ETH_P_ATALK: 508 return NP_AT; 509 case ETH_P_MPLS_UC: 510 return NP_MPLS_UC; 511 case ETH_P_MPLS_MC: 512 return NP_MPLS_MC; 513 } 514 return -1; 515} 516 517/* Translates an NP index into an ethertype */ 518static const int npindex_to_ethertype[NUM_NP] = { 519 ETH_P_IP, 520 ETH_P_IPV6, 521 ETH_P_IPX, 522 ETH_P_PPPTALK, 523 ETH_P_MPLS_UC, 524 ETH_P_MPLS_MC, 525}; 526 527/* 528 * Locking shorthand. 529 */ 530#define ppp_xmit_lock(ppp) spin_lock_bh(&(ppp)->wlock) 531#define ppp_xmit_unlock(ppp) spin_unlock_bh(&(ppp)->wlock) 532#define ppp_recv_lock(ppp) spin_lock_bh(&(ppp)->rlock) 533#define ppp_recv_unlock(ppp) spin_unlock_bh(&(ppp)->rlock) 534#define ppp_lock(ppp) do { ppp_xmit_lock(ppp); \ 535 ppp_recv_lock(ppp); } while (0) 536#define ppp_unlock(ppp) do { ppp_recv_unlock(ppp); \ 537 ppp_xmit_unlock(ppp); } while (0) 538 539/* 540 * /dev/ppp device routines. 541 * The /dev/ppp device is used by pppd to control the ppp unit. 542 * It supports the read, write, ioctl and poll functions. 543 * Open instances of /dev/ppp can be in one of three states: 544 * unattached, attached to a ppp unit, or attached to a ppp channel. 545 */ 546static int ppp_open(struct inode *inode, struct file *file) 547{ 548 /* 549 * This could (should?) be enforced by the permissions on /dev/ppp. 550 */ 551 if (!capable(CAP_NET_ADMIN)) 552 return -EPERM; 553 return 0; 554} 555 556static int ppp_release(struct inode *inode, struct file *file) 557{ 558 struct ppp_file *pf = file->private_data; 559 struct ppp *ppp; 560 561 if (pf != 0) { 562 file->private_data = NULL; 563 if (pf->kind == INTERFACE) { 564 ppp = PF_TO_PPP(pf); 565 if (file == ppp->owner) 566 ppp_shutdown_interface(ppp); 567 } 568 if (atomic_dec_and_test(&pf->refcnt)) { 569 switch (pf->kind) { 570 case INTERFACE: 571 ppp_destroy_interface(PF_TO_PPP(pf)); 572 break; 573 case CHANNEL: 574 ppp_destroy_channel(PF_TO_CHANNEL(pf)); 575 break; 576 } 577 } 578 } 579 return 0; 580} 581 582static ssize_t ppp_read(struct file *file, char __user *buf, 583 size_t count, loff_t *ppos) 584{ 585 struct ppp_file *pf = file->private_data; 586 DECLARE_WAITQUEUE(wait, current); 587 ssize_t ret; 588 struct sk_buff *skb = NULL; 589 590 ret = count; 591 592 if (pf == 0) 593 return -ENXIO; 594 add_wait_queue(&pf->rwait, &wait); 595 for (;;) { 596 set_current_state(TASK_INTERRUPTIBLE); 597 skb = skb_dequeue(&pf->rq); 598 if (skb) 599 break; 600 ret = 0; 601 if (pf->dead) 602 break; 603 if (pf->kind == INTERFACE) { 604 /* 605 * Return 0 (EOF) on an interface that has no 606 * channels connected, unless it is looping 607 * network traffic (demand mode). 608 */ 609 struct ppp *ppp = PF_TO_PPP(pf); 610 if (ppp->n_channels == 0 611 && (ppp->flags & SC_LOOP_TRAFFIC) == 0) 612 break; 613 } 614 ret = -EAGAIN; 615 if (file->f_flags & O_NONBLOCK) 616 break; 617 ret = -ERESTARTSYS; 618 if (signal_pending(current)) 619 break; 620 schedule(); 621 } 622 set_current_state(TASK_RUNNING); 623 remove_wait_queue(&pf->rwait, &wait); 624 625 if (skb == 0) 626 goto out; 627 628 ret = -EOVERFLOW; 629 if (skb->len > count) 630 goto outf; 631 ret = -EFAULT; 632 if (copy_to_user(buf, skb->data, skb->len)) 633 goto outf; 634 ret = skb->len; 635 636 outf: 637 kfree_skb(skb); 638 out: 639 return ret; 640} 641 642static ssize_t ppp_write(struct file *file, const char __user *buf, 643 size_t count, loff_t *ppos) 644{ 645 struct ppp_file *pf = file->private_data; 646 struct sk_buff *skb; 647 ssize_t ret; 648 649 if (pf == 0) 650 return -ENXIO; 651 ret = -ENOMEM; 652 skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL); 653 if (skb == 0) 654 goto out; 655 skb_reserve(skb, pf->hdrlen); 656 ret = -EFAULT; 657 if (copy_from_user(skb_put(skb, count), buf, count)) { 658 kfree_skb(skb); 659 goto out; 660 } 661 662 skb_queue_tail(&pf->xq, skb); 663 664 switch (pf->kind) { 665 case INTERFACE: 666 ppp_xmit_process(PF_TO_PPP(pf)); 667 break; 668 case CHANNEL: 669 ppp_channel_push(PF_TO_CHANNEL(pf)); 670 break; 671 } 672 673 ret = count; 674 675 out: 676 return ret; 677} 678 679/* No kernel lock - fine */ 680static unsigned int ppp_poll(struct file *file, poll_table *wait) 681{ 682 struct ppp_file *pf = file->private_data; 683 unsigned int mask; 684 685 if (pf == 0) 686 return 0; 687 poll_wait(file, &pf->rwait, wait); 688 mask = POLLOUT | POLLWRNORM; 689 if (skb_peek(&pf->rq) != 0) 690 mask |= POLLIN | POLLRDNORM; 691 if (pf->dead) 692 mask |= POLLHUP; 693 else if (pf->kind == INTERFACE) { 694 /* see comment in ppp_read */ 695 struct ppp *ppp = PF_TO_PPP(pf); 696 if (ppp->n_channels == 0 697 && (ppp->flags & SC_LOOP_TRAFFIC) == 0) 698 mask |= POLLIN | POLLRDNORM; 699 } 700 701 return mask; 702} 703 704#ifdef CONFIG_PPP_FILTER 705static int get_filter(void __user *arg, struct sock_filter **p) 706{ 707 struct sock_fprog uprog; 708 struct sock_filter *code = NULL; 709 int len, err; 710 711 if (copy_from_user(&uprog, arg, sizeof(uprog))) 712 return -EFAULT; 713 714 if (!uprog.len) { 715 *p = NULL; 716 return 0; 717 } 718 719 len = uprog.len * sizeof(struct sock_filter); 720 code = kmalloc(len, GFP_KERNEL); 721 if (code == NULL) 722 return -ENOMEM; 723 724 if (copy_from_user(code, uprog.filter, len)) { 725 kfree(code); 726 return -EFAULT; 727 } 728 729 err = sk_chk_filter(code, uprog.len); 730 if (err) { 731 kfree(code); 732 return err; 733 } 734 735 *p = code; 736 return uprog.len; 737} 738#endif /* CONFIG_PPP_FILTER */ 739 740static int ppp_ioctl(struct inode *inode, struct file *file, 741 unsigned int cmd, unsigned long arg) 742{ 743 struct ppp_file *pf = file->private_data; 744 struct ppp *ppp; 745 int err = -EFAULT, val, val2, i; 746 struct ppp_idle idle; 747 struct npioctl npi; 748 int unit, cflags; 749 struct slcompress *vj; 750 void __user *argp = (void __user *)arg; 751 int __user *p = argp; 752 753 if (pf == 0) 754 return ppp_unattached_ioctl(pf, file, cmd, arg); 755 756 if (cmd == PPPIOCDETACH) { 757 /* 758 * We have to be careful here... if the file descriptor 759 * has been dup'd, we could have another process in the 760 * middle of a poll using the same file *, so we had 761 * better not free the interface data structures - 762 * instead we fail the ioctl. Even in this case, we 763 * shut down the interface if we are the owner of it. 764 * Actually, we should get rid of PPPIOCDETACH, userland 765 * (i.e. pppd) could achieve the same effect by closing 766 * this fd and reopening /dev/ppp. 767 */ 768 err = -EINVAL; 769 if (pf->kind == INTERFACE) { 770 ppp = PF_TO_PPP(pf); 771 if (file == ppp->owner) 772 ppp_shutdown_interface(ppp); 773 } 774 if (atomic_read(&file->f_count) <= 2) { 775 ppp_release(inode, file); 776 err = 0; 777 } else 778 printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%d\n", 779 atomic_read(&file->f_count)); 780 return err; 781 } 782 783 if (pf->kind == CHANNEL) { 784 struct channel *pch = PF_TO_CHANNEL(pf); 785 struct ppp_channel *chan; 786 787 switch (cmd) { 788 case PPPIOCCONNECT: 789 if (get_user(unit, p)) 790 break; 791 err = ppp_connect_channel(pch, unit); 792 break; 793 794 case PPPIOCDISCONN: 795 err = ppp_disconnect_channel(pch); 796 break; 797 798 default: 799 down_read(&pch->chan_sem); 800 chan = pch->chan; 801 err = -ENOTTY; 802 if (chan && chan->ops->ioctl) 803 err = chan->ops->ioctl(chan, cmd, arg); 804 up_read(&pch->chan_sem); 805 } 806 return err; 807 } 808 809 if (pf->kind != INTERFACE) { 810 /* can't happen */ 811 printk(KERN_ERR "PPP: not interface or channel??\n"); 812 return -EINVAL; 813 } 814 815 ppp = PF_TO_PPP(pf); 816 switch (cmd) { 817 case PPPIOCSMRU: 818 if (get_user(val, p)) 819 break; 820 ppp->mru = val; 821 err = 0; 822 break; 823 824 case PPPIOCSFLAGS: 825 if (get_user(val, p)) 826 break; 827 ppp_lock(ppp); 828 cflags = ppp->flags & ~val; 829 ppp->flags = val & SC_FLAG_BITS; 830 ppp_unlock(ppp); 831 if (cflags & SC_CCP_OPEN) 832 ppp_ccp_closed(ppp); 833 err = 0; 834 break; 835 836 case PPPIOCGFLAGS: 837 val = ppp->flags | ppp->xstate | ppp->rstate; 838 if (put_user(val, p)) 839 break; 840 err = 0; 841 break; 842 843 case PPPIOCSCOMPRESS: 844 err = ppp_set_compress(ppp, arg); 845 break; 846 847 case PPPIOCGUNIT: 848 if (put_user(ppp->file.index, p)) 849 break; 850 err = 0; 851 break; 852 853 case PPPIOCSDEBUG: 854 if (get_user(val, p)) 855 break; 856 ppp->debug = val; 857 err = 0; 858 break; 859 860 case PPPIOCGDEBUG: 861 if (put_user(ppp->debug, p)) 862 break; 863 err = 0; 864 break; 865 866 case PPPIOCGIDLE: 867 /*foxconn modified start, water, 11/27/09, @pppoe/pptp idle time not correct issue*/ 868 if (jiffies >= ppp->last_xmit) /* wklin modified from > to >=*/ 869 idle.xmit_idle = (jiffies - ppp->last_xmit) / HZ; 870 else 871 idle.xmit_idle = (0xFFFFFFFF - ppp->last_xmit + jiffies) / HZ; 872 idle.recv_idle = idle.xmit_idle; 873 /* 874 idle.xmit_idle = (jiffies - ppp->last_xmit) / HZ; 875 idle.recv_idle = (jiffies - ppp->last_recv) / HZ; 876 */ 877 /*foxconn modified end, water, 11/27/09*/ 878 if (copy_to_user(argp, &idle, sizeof(idle))) 879 break; 880 err = 0; 881 break; 882 883 case PPPIOCSMAXCID: 884 if (get_user(val, p)) 885 break; 886 val2 = 15; 887 if ((val >> 16) != 0) { 888 val2 = val >> 16; 889 val &= 0xffff; 890 } 891 vj = slhc_init(val2+1, val+1); 892 if (vj == 0) { 893 printk(KERN_ERR "PPP: no memory (VJ compressor)\n"); 894 err = -ENOMEM; 895 break; 896 } 897 ppp_lock(ppp); 898 if (ppp->vj != 0) 899 slhc_free(ppp->vj); 900 ppp->vj = vj; 901 ppp_unlock(ppp); 902 err = 0; 903 break; 904 905 case PPPIOCGNPMODE: 906 case PPPIOCSNPMODE: 907 if (copy_from_user(&npi, argp, sizeof(npi))) 908 break; 909 err = proto_to_npindex(npi.protocol); 910 if (err < 0) 911 break; 912 i = err; 913 if (cmd == PPPIOCGNPMODE) { 914 err = -EFAULT; 915 npi.mode = ppp->npmode[i]; 916 if (copy_to_user(argp, &npi, sizeof(npi))) 917 break; 918 } else { 919 ppp->npmode[i] = npi.mode; 920 /* we may be able to transmit more packets now (??) */ 921 netif_wake_queue(ppp->dev); 922 } 923 err = 0; 924 break; 925 926#ifdef CONFIG_PPP_FILTER 927 case PPPIOCSPASS: 928 { 929 struct sock_filter *code; 930 err = get_filter(argp, &code); 931 if (err >= 0) { 932 ppp_lock(ppp); 933 kfree(ppp->pass_filter); 934 ppp->pass_filter = code; 935 ppp->pass_len = err; 936 ppp_unlock(ppp); 937 err = 0; 938 } 939 break; 940 } 941 case PPPIOCSACTIVE: 942 { 943 struct sock_filter *code; 944 err = get_filter(argp, &code); 945 if (err >= 0) { 946 ppp_lock(ppp); 947 kfree(ppp->active_filter); 948 ppp->active_filter = code; 949 ppp->active_len = err; 950 ppp_unlock(ppp); 951 err = 0; 952 } 953 break; 954 } 955#endif /* CONFIG_PPP_FILTER */ 956 957#ifdef CONFIG_PPP_MULTILINK 958 case PPPIOCSMRRU: 959 if (get_user(val, p)) 960 break; 961 ppp_recv_lock(ppp); 962 ppp->mrru = val; 963 ppp_recv_unlock(ppp); 964 err = 0; 965 break; 966#endif /* CONFIG_PPP_MULTILINK */ 967 968 default: 969 err = -ENOTTY; 970 } 971 972 return err; 973} 974 975static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file, 976 unsigned int cmd, unsigned long arg) 977{ 978 int unit, err = -EFAULT; 979 struct ppp *ppp; 980 struct channel *chan; 981 int __user *p = (int __user *)arg; 982 983 switch (cmd) { 984 case PPPIOCNEWUNIT: 985 /* Create a new ppp unit */ 986 if (get_user(unit, p)) 987 break; 988 ppp = ppp_create_interface(unit, &err); 989 if (ppp == 0) 990 break; 991 file->private_data = &ppp->file; 992 ppp->owner = file; 993 err = -EFAULT; 994 if (put_user(ppp->file.index, p)) 995 break; 996 err = 0; 997 break; 998 999 case PPPIOCATTACH: 1000 /* Attach to an existing ppp unit */ 1001 if (get_user(unit, p)) 1002 break; 1003 mutex_lock(&all_ppp_mutex); 1004 err = -ENXIO; 1005 ppp = ppp_find_unit(unit); 1006 if (ppp != 0) { 1007 atomic_inc(&ppp->file.refcnt); 1008 file->private_data = &ppp->file; 1009 err = 0; 1010 } 1011 mutex_unlock(&all_ppp_mutex); 1012 break; 1013 1014 case PPPIOCATTCHAN: 1015 if (get_user(unit, p)) 1016 break; 1017 spin_lock_bh(&all_channels_lock); 1018 err = -ENXIO; 1019 chan = ppp_find_channel(unit); 1020 if (chan != 0) { 1021 atomic_inc(&chan->file.refcnt); 1022 file->private_data = &chan->file; 1023 err = 0; 1024 } 1025 spin_unlock_bh(&all_channels_lock); 1026 break; 1027 1028 default: 1029 err = -ENOTTY; 1030 } 1031 return err; 1032} 1033 1034static const struct file_operations ppp_device_fops = { 1035 .owner = THIS_MODULE, 1036 .read = ppp_read, 1037 .write = ppp_write, 1038 .poll = ppp_poll, 1039 .ioctl = ppp_ioctl, 1040 .open = ppp_open, 1041 .release = ppp_release 1042}; 1043 1044#define PPP_MAJOR 108 1045 1046/* Called at boot time if ppp is compiled into the kernel, 1047 or at module load time (from init_module) if compiled as a module. */ 1048static int __init ppp_init(void) 1049{ 1050 int err; 1051 1052 printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n"); 1053 err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops); 1054 if (!err) { 1055 ppp_class = class_create(THIS_MODULE, "ppp"); 1056 if (IS_ERR(ppp_class)) { 1057 err = PTR_ERR(ppp_class); 1058 goto out_chrdev; 1059 } 1060 device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), "ppp"); 1061 } 1062 1063out: 1064 if (err) 1065 printk(KERN_ERR "failed to register PPP device (%d)\n", err); 1066 1067 return err; 1068 1069out_chrdev: 1070 unregister_chrdev(PPP_MAJOR, "ppp"); 1071 goto out; 1072} 1073 1074/* 1075 * Network interface unit routines. 1076 */ 1077static int 1078ppp_start_xmit(struct sk_buff *skb, struct net_device *dev) 1079{ 1080 struct ppp *ppp = (struct ppp *) dev->priv; 1081 int npi, proto; 1082 unsigned char *pp; 1083 1084 npi = ethertype_to_npindex(ntohs(skb->protocol)); 1085 if (npi < 0) 1086 goto outf; 1087 1088 /* Drop, accept or reject the packet */ 1089 switch (ppp->npmode[npi]) { 1090 case NPMODE_PASS: 1091 break; 1092 case NPMODE_QUEUE: 1093 /* it would be nice to have a way to tell the network 1094 system to queue this one up for later. */ 1095 goto outf; 1096 case NPMODE_DROP: 1097 case NPMODE_ERROR: 1098 goto outf; 1099 } 1100 1101 /* Put the 2-byte PPP protocol number on the front, 1102 making sure there is room for the address and control fields. */ 1103 if (skb_headroom(skb) < PPP_HDRLEN) { 1104 struct sk_buff *ns; 1105 1106 ns = alloc_skb(skb->len + dev->hard_header_len, GFP_ATOMIC); 1107 if (ns == 0) 1108 goto outf; 1109 skb_reserve(ns, dev->hard_header_len); 1110 skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len); 1111 kfree_skb(skb); 1112 skb = ns; 1113 } 1114 pp = skb_push(skb, 2); 1115 proto = npindex_to_proto[npi]; 1116 pp[0] = proto >> 8; 1117 pp[1] = proto; 1118 1119 netif_stop_queue(dev); 1120 skb_queue_tail(&ppp->file.xq, skb); 1121 ppp_xmit_process(ppp); 1122 return 0; 1123 1124 outf: 1125 kfree_skb(skb); 1126 ++ppp->stats.tx_dropped; 1127 return 0; 1128} 1129 1130static struct net_device_stats * 1131ppp_net_stats(struct net_device *dev) 1132{ 1133 struct ppp *ppp = (struct ppp *) dev->priv; 1134 1135 return &ppp->stats; 1136} 1137 1138static int 1139ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1140{ 1141 struct ppp *ppp = dev->priv; 1142 int err = -EFAULT; 1143 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data; 1144 struct ppp_stats stats; 1145 struct ppp_comp_stats cstats; 1146 char *vers; 1147 1148 switch (cmd) { 1149 case SIOCGPPPSTATS: 1150 ppp_get_stats(ppp, &stats); 1151 if (copy_to_user(addr, &stats, sizeof(stats))) 1152 break; 1153 err = 0; 1154 break; 1155 1156 case SIOCGPPPCSTATS: 1157 memset(&cstats, 0, sizeof(cstats)); 1158 if (ppp->xc_state != 0) 1159 ppp->xcomp->comp_stat(ppp->xc_state, &cstats.c); 1160 if (ppp->rc_state != 0) 1161 ppp->rcomp->decomp_stat(ppp->rc_state, &cstats.d); 1162 if (copy_to_user(addr, &cstats, sizeof(cstats))) 1163 break; 1164 err = 0; 1165 break; 1166 1167 case SIOCGPPPVER: 1168 vers = PPP_VERSION; 1169 if (copy_to_user(addr, vers, strlen(vers) + 1)) 1170 break; 1171 err = 0; 1172 break; 1173 1174 default: 1175 err = -EINVAL; 1176 } 1177 1178 return err; 1179} 1180 1181static void ppp_setup(struct net_device *dev) 1182{ 1183 dev->hard_header_len = PPP_HDRLEN; 1184 dev->mtu = PPP_MTU; 1185 dev->addr_len = 0; 1186 dev->tx_queue_len = 3; 1187 dev->type = ARPHRD_PPP; 1188 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 1189} 1190 1191/* 1192 * Transmit-side routines. 1193 */ 1194 1195/* 1196 * Called to do any work queued up on the transmit side 1197 * that can now be done. 1198 */ 1199static void 1200ppp_xmit_process(struct ppp *ppp) 1201{ 1202 struct sk_buff *skb; 1203 1204 ppp_xmit_lock(ppp); 1205 if (ppp->dev != 0) { 1206 ppp_push(ppp); 1207 while (ppp->xmit_pending == 0 1208 && (skb = skb_dequeue(&ppp->file.xq)) != 0) 1209 ppp_send_frame(ppp, skb); 1210 /* If there's no work left to do, tell the core net 1211 code that we can accept some more. */ 1212 if (ppp->xmit_pending == 0 && skb_peek(&ppp->file.xq) == 0) 1213 netif_wake_queue(ppp->dev); 1214 } 1215 ppp_xmit_unlock(ppp); 1216} 1217 1218static inline struct sk_buff * 1219pad_compress_skb(struct ppp *ppp, struct sk_buff *skb) 1220{ 1221 struct sk_buff *new_skb; 1222 int len; 1223 int new_skb_size = ppp->dev->mtu + 1224 ppp->xcomp->comp_extra + ppp->dev->hard_header_len; 1225 int compressor_skb_size = ppp->dev->mtu + 1226 ppp->xcomp->comp_extra + PPP_HDRLEN; 1227 new_skb = alloc_skb(new_skb_size, GFP_ATOMIC); 1228 if (!new_skb) { 1229 if (net_ratelimit()) 1230 printk(KERN_ERR "PPP: no memory (comp pkt)\n"); 1231 return NULL; 1232 } 1233 if (ppp->dev->hard_header_len > PPP_HDRLEN) 1234 skb_reserve(new_skb, 1235 ppp->dev->hard_header_len - PPP_HDRLEN); 1236 1237 /* compressor still expects A/C bytes in hdr */ 1238 len = ppp->xcomp->compress(ppp->xc_state, skb->data - 2, 1239 new_skb->data, skb->len + 2, 1240 compressor_skb_size); 1241 if (len > 0 && (ppp->flags & SC_CCP_UP)) { 1242 kfree_skb(skb); 1243 skb = new_skb; 1244 skb_put(skb, len); 1245 skb_pull(skb, 2); /* pull off A/C bytes */ 1246 } else if (len == 0) { 1247 /* didn't compress, or CCP not up yet */ 1248 kfree_skb(new_skb); 1249 new_skb = skb; 1250 } else { 1251 /* 1252 * (len < 0) 1253 * MPPE requires that we do not send unencrypted 1254 * frames. The compressor will return -1 if we 1255 * should drop the frame. We cannot simply test 1256 * the compress_proto because MPPE and MPPC share 1257 * the same number. 1258 */ 1259 if (net_ratelimit()) 1260 printk(KERN_ERR "ppp: compressor dropped pkt\n"); 1261 kfree_skb(skb); 1262 kfree_skb(new_skb); 1263 new_skb = NULL; 1264 } 1265 return new_skb; 1266} 1267 1268/* 1269 * Compress and send a frame. 1270 * The caller should have locked the xmit path, 1271 * and xmit_pending should be 0. 1272 */ 1273static void 1274ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) 1275{ 1276 int proto = PPP_PROTO(skb); 1277 struct sk_buff *new_skb; 1278 int len; 1279 unsigned char *cp; 1280 1281 if (proto < 0x8000) { 1282#ifdef CONFIG_PPP_FILTER 1283 /* check if we should pass this packet */ 1284 /* the filter instructions are constructed assuming 1285 a four-byte PPP header on each packet */ 1286 *skb_push(skb, 2) = 1; 1287 if (ppp->pass_filter 1288 && sk_run_filter(skb, ppp->pass_filter, 1289 ppp->pass_len) == 0) { 1290 if (ppp->debug & 1) 1291 printk(KERN_DEBUG "PPP: outbound frame not passed\n"); 1292 kfree_skb(skb); 1293 return; 1294 } 1295 /* if this packet passes the active filter, record the time */ 1296 if (!(ppp->active_filter 1297 && sk_run_filter(skb, ppp->active_filter, 1298 ppp->active_len) == 0)) 1299 ppp->last_xmit = jiffies; 1300 skb_pull(skb, 2); 1301#else 1302 /* for data packets, record the time */ 1303 //ppp->last_xmit = jiffies; 1304 /* foxconn wklin modified start, 01/18/2007 */ 1305 if (!skb->sk) /* record the time if not from IP stack */ 1306 ppp->last_xmit = jiffies; 1307 /* foxconn wklin modified end, 01/02/2007 */ 1308#endif /* CONFIG_PPP_FILTER */ 1309 } 1310 1311 ++ppp->stats.tx_packets; 1312 ppp->stats.tx_bytes += skb->len - 2; 1313 1314 switch (proto) { 1315 case PPP_IP: 1316 if (ppp->vj == 0 || (ppp->flags & SC_COMP_TCP) == 0) 1317 break; 1318 /* try to do VJ TCP header compression */ 1319 new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2, 1320 GFP_ATOMIC); 1321 if (new_skb == 0) { 1322 printk(KERN_ERR "PPP: no memory (VJ comp pkt)\n"); 1323 goto drop; 1324 } 1325 skb_reserve(new_skb, ppp->dev->hard_header_len - 2); 1326 cp = skb->data + 2; 1327 len = slhc_compress(ppp->vj, cp, skb->len - 2, 1328 new_skb->data + 2, &cp, 1329 !(ppp->flags & SC_NO_TCP_CCID)); 1330 if (cp == skb->data + 2) { 1331 /* didn't compress */ 1332 kfree_skb(new_skb); 1333 } else { 1334 if (cp[0] & SL_TYPE_COMPRESSED_TCP) { 1335 proto = PPP_VJC_COMP; 1336 cp[0] &= ~SL_TYPE_COMPRESSED_TCP; 1337 } else { 1338 proto = PPP_VJC_UNCOMP; 1339 cp[0] = skb->data[2]; 1340 } 1341 kfree_skb(skb); 1342 skb = new_skb; 1343 cp = skb_put(skb, len + 2); 1344 cp[0] = 0; 1345 cp[1] = proto; 1346 } 1347 break; 1348 1349 case PPP_CCP: 1350 /* peek at outbound CCP frames */ 1351 ppp_ccp_peek(ppp, skb, 0); 1352 break; 1353 } 1354 1355 /* try to do packet compression */ 1356 if ((ppp->xstate & SC_COMP_RUN) && ppp->xc_state != 0 1357 && proto != PPP_LCP && proto != PPP_CCP) { 1358 if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) { 1359 if (net_ratelimit()) 1360 printk(KERN_ERR "ppp: compression required but down - pkt dropped.\n"); 1361 goto drop; 1362 } 1363 skb = pad_compress_skb(ppp, skb); 1364 if (!skb) 1365 goto drop; 1366 } 1367 1368 /* 1369 * If we are waiting for traffic (demand dialling), 1370 * queue it up for pppd to receive. 1371 */ 1372 if (ppp->flags & SC_LOOP_TRAFFIC) { 1373 if (ppp->file.rq.qlen > PPP_MAX_RQLEN) 1374 goto drop; 1375 /* Foxconn added start, Winster Chan, 01/02/2007 */ 1376 if (skb->sk) { 1377 if (skb->data[0]==0x00 && skb->data[1]==0x21 && 1378 skb->data[11]==0x01 && skb->data[18]==0xFF) 1379 { 1380 printk("PPP: Received triggerring packet.\n"); 1381 /*foxconn added start, water, 04/16/10*/ 1382 /*add michael's patch, @3500L BTS-A201001425: 1383 Internet PPPoE/PPTP idle time is not precise 1384 at the first time.*/ 1385 ppp->last_xmit = jiffies; 1386 /*foxconn added end, water, 04/16/10*/ 1387 } 1388 else 1389 goto drop; 1390 } 1391 printk("PPP: DoD triggered.\n"); 1392 /* Foxconn added end, Winster Chan, 01/02/2007 */ 1393 skb_queue_tail(&ppp->file.rq, skb); 1394 wake_up_interruptible(&ppp->file.rwait); 1395 return; 1396 } 1397 1398 ppp->xmit_pending = skb; 1399 ppp_push(ppp); 1400 return; 1401 1402 drop: 1403 if (skb) 1404 kfree_skb(skb); 1405 ++ppp->stats.tx_errors; 1406} 1407 1408/* 1409 * Try to send the frame in xmit_pending. 1410 * The caller should have the xmit path locked. 1411 */ 1412static void 1413ppp_push(struct ppp *ppp) 1414{ 1415 struct list_head *list; 1416 struct channel *pch; 1417 struct sk_buff *skb = ppp->xmit_pending; 1418 1419 if (skb == 0) 1420 return; 1421 1422 list = &ppp->channels; 1423 if (list_empty(list)) { 1424 /* nowhere to send the packet, just drop it */ 1425 ppp->xmit_pending = NULL; 1426 kfree_skb(skb); 1427 return; 1428 } 1429 1430 if ((ppp->flags & SC_MULTILINK) == 0) { 1431 /* foxconn wklin added start, 12/09/2010 */ 1432#define PPP_SHORTCUT 1433#ifdef PPP_SHORTCUT 1434 extern struct ppp_channel_ops async_ops; 1435#endif 1436 /* foxconn wklin added end, 12/09/2010 */ 1437 /* not doing multilink: send it down the first channel */ 1438 list = list->next; 1439 pch = list_entry(list, struct channel, clist); 1440 1441 /* foxconn wklin added start, 12/09/2010 */ 1442#ifdef PPP_SHORTCUT 1443 /* we bound two channels to ppp interface, make sure the we send to the 1444 * short-cut chanel (!&async_ops). 1445 */ 1446 if (pch->chan->ops == &async_ops && ppp->n_channels == 2) { 1447 list = list->next; 1448 pch = list_entry(list, struct channel, clist); 1449 } 1450#endif 1451 /* foxconn wklin added end, 12/09/2010 */ 1452 spin_lock_bh(&pch->downl); 1453 if (pch->chan) { 1454 if (pch->chan->ops->start_xmit(pch->chan, skb)) 1455 ppp->xmit_pending = NULL; 1456 } else { 1457 /* channel got unregistered */ 1458 kfree_skb(skb); 1459 ppp->xmit_pending = NULL; 1460 } 1461 spin_unlock_bh(&pch->downl); 1462 return; 1463 } 1464 1465#ifdef CONFIG_PPP_MULTILINK 1466 /* Multilink: fragment the packet over as many links 1467 as can take the packet at the moment. */ 1468 if (!ppp_mp_explode(ppp, skb)) 1469 return; 1470#endif /* CONFIG_PPP_MULTILINK */ 1471 1472 ppp->xmit_pending = NULL; 1473 kfree_skb(skb); 1474} 1475 1476#ifdef CONFIG_PPP_MULTILINK 1477/* 1478 * Divide a packet to be transmitted into fragments and 1479 * send them out the individual links. 1480 */ 1481static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) 1482{ 1483 int len, fragsize; 1484 int i, bits, hdrlen, mtu; 1485 int flen; 1486 int navail, nfree; 1487 int nbigger; 1488 unsigned char *p, *q; 1489 struct list_head *list; 1490 struct channel *pch; 1491 struct sk_buff *frag; 1492 struct ppp_channel *chan; 1493 1494 nfree = 0; /* # channels which have no packet already queued */ 1495 navail = 0; /* total # of usable channels (not deregistered) */ 1496 hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; 1497 i = 0; 1498 list_for_each_entry(pch, &ppp->channels, clist) { 1499 navail += pch->avail = (pch->chan != NULL); 1500 if (pch->avail) { 1501 if (skb_queue_empty(&pch->file.xq) || 1502 !pch->had_frag) { 1503 pch->avail = 2; 1504 ++nfree; 1505 } 1506 if (!pch->had_frag && i < ppp->nxchan) 1507 ppp->nxchan = i; 1508 } 1509 ++i; 1510 } 1511 1512 /* 1513 * Don't start sending this packet unless at least half of 1514 * the channels are free. This gives much better TCP 1515 * performance if we have a lot of channels. 1516 */ 1517 if (nfree == 0 || nfree < navail / 2) 1518 return 0; /* can't take now, leave it in xmit_pending */ 1519 1520 p = skb->data; 1521 len = skb->len; 1522 if (*p == 0) { 1523 ++p; 1524 --len; 1525 } 1526 1527 /* 1528 * Decide on fragment size. 1529 * We create a fragment for each free channel regardless of 1530 * how small they are (i.e. even 0 length) in order to minimize 1531 * the time that it will take to detect when a channel drops 1532 * a fragment. 1533 */ 1534 fragsize = len; 1535 if (nfree > 1) 1536 fragsize = DIV_ROUND_UP(fragsize, nfree); 1537 /* nbigger channels get fragsize bytes, the rest get fragsize-1, 1538 except if nbigger==0, then they all get fragsize. */ 1539 nbigger = len % nfree; 1540 1541 /* skip to the channel after the one we last used 1542 and start at that one */ 1543 list = &ppp->channels; 1544 for (i = 0; i < ppp->nxchan; ++i) { 1545 list = list->next; 1546 if (list == &ppp->channels) { 1547 i = 0; 1548 break; 1549 } 1550 } 1551 1552 /* create a fragment for each channel */ 1553 bits = B; 1554 while (nfree > 0 || len > 0) { 1555 list = list->next; 1556 if (list == &ppp->channels) { 1557 i = 0; 1558 continue; 1559 } 1560 pch = list_entry(list, struct channel, clist); 1561 ++i; 1562 if (!pch->avail) 1563 continue; 1564 1565 /* 1566 * Skip this channel if it has a fragment pending already and 1567 * we haven't given a fragment to all of the free channels. 1568 */ 1569 if (pch->avail == 1) { 1570 if (nfree > 0) 1571 continue; 1572 } else { 1573 --nfree; 1574 pch->avail = 1; 1575 } 1576 1577 /* check the channel's mtu and whether it is still attached. */ 1578 spin_lock_bh(&pch->downl); 1579 if (pch->chan == NULL) { 1580 /* can't use this channel, it's being deregistered */ 1581 spin_unlock_bh(&pch->downl); 1582 pch->avail = 0; 1583 if (--navail == 0) 1584 break; 1585 continue; 1586 } 1587 1588 /* 1589 * Create a fragment for this channel of 1590 * min(max(mtu+2-hdrlen, 4), fragsize, len) bytes. 1591 * If mtu+2-hdrlen < 4, that is a ridiculously small 1592 * MTU, so we use mtu = 2 + hdrlen. 1593 */ 1594 if (fragsize > len) 1595 fragsize = len; 1596 flen = fragsize; 1597 mtu = pch->chan->mtu + 2 - hdrlen; 1598 if (mtu < 4) 1599 mtu = 4; 1600 if (flen > mtu) 1601 flen = mtu; 1602 if (flen == len && nfree == 0) 1603 bits |= E; 1604 frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC); 1605 if (frag == 0) 1606 goto noskb; 1607 q = skb_put(frag, flen + hdrlen); 1608 1609 /* make the MP header */ 1610 q[0] = PPP_MP >> 8; 1611 q[1] = PPP_MP; 1612 if (ppp->flags & SC_MP_XSHORTSEQ) { 1613 q[2] = bits + ((ppp->nxseq >> 8) & 0xf); 1614 q[3] = ppp->nxseq; 1615 } else { 1616 q[2] = bits; 1617 q[3] = ppp->nxseq >> 16; 1618 q[4] = ppp->nxseq >> 8; 1619 q[5] = ppp->nxseq; 1620 } 1621 1622 /* 1623 * Copy the data in. 1624 * Unfortunately there is a bug in older versions of 1625 * the Linux PPP multilink reconstruction code where it 1626 * drops 0-length fragments. Therefore we make sure the 1627 * fragment has at least one byte of data. Any bytes 1628 * we add in this situation will end up as padding on the 1629 * end of the reconstructed packet. 1630 */ 1631 if (flen == 0) 1632 *skb_put(frag, 1) = 0; 1633 else 1634 memcpy(q + hdrlen, p, flen); 1635 1636 /* try to send it down the channel */ 1637 chan = pch->chan; 1638 if (!skb_queue_empty(&pch->file.xq) || 1639 !chan->ops->start_xmit(chan, frag)) 1640 skb_queue_tail(&pch->file.xq, frag); 1641 pch->had_frag = 1; 1642 p += flen; 1643 len -= flen; 1644 ++ppp->nxseq; 1645 bits = 0; 1646 spin_unlock_bh(&pch->downl); 1647 1648 if (--nbigger == 0 && fragsize > 0) 1649 --fragsize; 1650 } 1651 ppp->nxchan = i; 1652 1653 return 1; 1654 1655 noskb: 1656 spin_unlock_bh(&pch->downl); 1657 if (ppp->debug & 1) 1658 printk(KERN_ERR "PPP: no memory (fragment)\n"); 1659 ++ppp->stats.tx_errors; 1660 ++ppp->nxseq; 1661 return 1; /* abandon the frame */ 1662} 1663#endif /* CONFIG_PPP_MULTILINK */ 1664 1665/* 1666 * Try to send data out on a channel. 1667 */ 1668static void 1669ppp_channel_push(struct channel *pch) 1670{ 1671 struct sk_buff *skb; 1672 struct ppp *ppp; 1673 1674 spin_lock_bh(&pch->downl); 1675 if (pch->chan != 0) { 1676 while (!skb_queue_empty(&pch->file.xq)) { 1677 skb = skb_dequeue(&pch->file.xq); 1678 if (!pch->chan->ops->start_xmit(pch->chan, skb)) { 1679 /* put the packet back and try again later */ 1680 skb_queue_head(&pch->file.xq, skb); 1681 break; 1682 } 1683 } 1684 } else { 1685 /* channel got deregistered */ 1686 skb_queue_purge(&pch->file.xq); 1687 } 1688 spin_unlock_bh(&pch->downl); 1689 /* see if there is anything from the attached unit to be sent */ 1690 if (skb_queue_empty(&pch->file.xq)) { 1691 read_lock_bh(&pch->upl); 1692 ppp = pch->ppp; 1693 if (ppp != 0) 1694 ppp_xmit_process(ppp); 1695 read_unlock_bh(&pch->upl); 1696 } 1697} 1698 1699/* 1700 * Receive-side routines. 1701 */ 1702 1703/* misuse a few fields of the skb for MP reconstruction */ 1704#define sequence priority 1705#define BEbits cb[0] 1706 1707static inline void 1708ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) 1709{ 1710 ppp_recv_lock(ppp); 1711 /* ppp->dev == 0 means interface is closing down */ 1712 if (ppp->dev != 0) 1713 ppp_receive_frame(ppp, skb, pch); 1714 else 1715 kfree_skb(skb); 1716 ppp_recv_unlock(ppp); 1717} 1718 1719void 1720ppp_input(struct ppp_channel *chan, struct sk_buff *skb) 1721{ 1722 struct channel *pch = chan->ppp; 1723 int proto; 1724 1725 if (pch == 0 || skb->len == 0) { 1726 kfree_skb(skb); 1727 return; 1728 } 1729 1730 proto = PPP_PROTO(skb); 1731 read_lock_bh(&pch->upl); 1732 if (pch->ppp == 0 || proto >= 0xc000 || proto == PPP_CCPFRAG) { 1733 /* put it on the channel queue */ 1734 skb_queue_tail(&pch->file.rq, skb); 1735 /* drop old frames if queue too long */ 1736 while (pch->file.rq.qlen > PPP_MAX_RQLEN 1737 && (skb = skb_dequeue(&pch->file.rq)) != 0) 1738 kfree_skb(skb); 1739 wake_up_interruptible(&pch->file.rwait); 1740 } else { 1741 ppp_do_recv(pch->ppp, skb, pch); 1742 } 1743 read_unlock_bh(&pch->upl); 1744} 1745 1746/* Put a 0-length skb in the receive queue as an error indication */ 1747void 1748ppp_input_error(struct ppp_channel *chan, int code) 1749{ 1750 struct channel *pch = chan->ppp; 1751 struct sk_buff *skb; 1752 1753 if (pch == 0) 1754 return; 1755 1756 read_lock_bh(&pch->upl); 1757 if (pch->ppp != 0) { 1758 skb = alloc_skb(0, GFP_ATOMIC); 1759 if (skb != 0) { 1760 skb->len = 0; /* probably unnecessary */ 1761 skb->cb[0] = code; 1762 ppp_do_recv(pch->ppp, skb, pch); 1763 } 1764 } 1765 read_unlock_bh(&pch->upl); 1766} 1767 1768/* 1769 * We come in here to process a received frame. 1770 * The receive side of the ppp unit is locked. 1771 */ 1772static void 1773ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) 1774{ 1775 if (skb->len >= 2) { 1776#ifdef CONFIG_PPP_MULTILINK 1777 if (PPP_PROTO(skb) == PPP_MP) 1778 ppp_receive_mp_frame(ppp, skb, pch); 1779 else 1780#endif /* CONFIG_PPP_MULTILINK */ 1781 ppp_receive_nonmp_frame(ppp, skb); 1782 return; 1783 } 1784 1785 if (skb->len > 0) 1786 /* note: a 0-length skb is used as an error indication */ 1787 ++ppp->stats.rx_length_errors; 1788 1789 kfree_skb(skb); 1790 ppp_receive_error(ppp); 1791} 1792 1793static void 1794ppp_receive_error(struct ppp *ppp) 1795{ 1796 ++ppp->stats.rx_errors; 1797 if (ppp->vj != 0) 1798 slhc_toss(ppp->vj); 1799} 1800 1801static void 1802ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) 1803{ 1804 struct sk_buff *ns; 1805 int proto, len, npi; 1806 1807 /* 1808 * Decompress the frame, if compressed. 1809 * Note that some decompressors need to see uncompressed frames 1810 * that come in as well as compressed frames. 1811 */ 1812 if (ppp->rc_state != 0 && (ppp->rstate & SC_DECOMP_RUN) 1813 && (ppp->rstate & (SC_DC_FERROR | SC_DC_ERROR)) == 0) 1814 skb = ppp_decompress_frame(ppp, skb); 1815 1816 if (ppp->flags & SC_MUST_COMP && ppp->rstate & SC_DC_FERROR) 1817 goto err; 1818 1819 proto = PPP_PROTO(skb); 1820 switch (proto) { 1821 case PPP_VJC_COMP: 1822 /* decompress VJ compressed packets */ 1823 if (ppp->vj == 0 || (ppp->flags & SC_REJ_COMP_TCP)) 1824 goto err; 1825 1826 if (skb_tailroom(skb) < 124) { 1827 /* copy to a new sk_buff with more tailroom */ 1828 ns = dev_alloc_skb(skb->len + 128); 1829 if (ns == 0) { 1830 printk(KERN_ERR"PPP: no memory (VJ decomp)\n"); 1831 goto err; 1832 } 1833 skb_reserve(ns, 2); 1834 skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len); 1835 kfree_skb(skb); 1836 skb = ns; 1837 } 1838 else 1839 skb->ip_summed = CHECKSUM_NONE; 1840 1841 len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2); 1842 if (len <= 0) { 1843 printk(KERN_DEBUG "PPP: VJ decompression error\n"); 1844 goto err; 1845 } 1846 len += 2; 1847 if (len > skb->len) 1848 skb_put(skb, len - skb->len); 1849 else if (len < skb->len) 1850 skb_trim(skb, len); 1851 proto = PPP_IP; 1852 break; 1853 1854 case PPP_VJC_UNCOMP: 1855 if (ppp->vj == 0 || (ppp->flags & SC_REJ_COMP_TCP)) 1856 goto err; 1857 1858 /* Until we fix the decompressor need to make sure 1859 * data portion is linear. 1860 */ 1861 if (!pskb_may_pull(skb, skb->len)) 1862 goto err; 1863 1864 if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) { 1865 printk(KERN_ERR "PPP: VJ uncompressed error\n"); 1866 goto err; 1867 } 1868 proto = PPP_IP; 1869 break; 1870 1871 case PPP_CCP: 1872 ppp_ccp_peek(ppp, skb, 1); 1873 break; 1874 } 1875 1876 ++ppp->stats.rx_packets; 1877 ppp->stats.rx_bytes += skb->len - 2; 1878 1879 npi = proto_to_npindex(proto); 1880 if (npi < 0) { 1881 /* control or unknown frame - pass it to pppd */ 1882 skb_queue_tail(&ppp->file.rq, skb); 1883 /* limit queue length by dropping old frames */ 1884 while (ppp->file.rq.qlen > PPP_MAX_RQLEN 1885 && (skb = skb_dequeue(&ppp->file.rq)) != 0) 1886 kfree_skb(skb); 1887 /* wake up any process polling or blocking on read */ 1888 wake_up_interruptible(&ppp->file.rwait); 1889 1890 } else { 1891 /* network protocol frame - give it to the kernel */ 1892 1893#ifdef CONFIG_PPP_FILTER 1894 /* check if the packet passes the pass and active filters */ 1895 /* the filter instructions are constructed assuming 1896 a four-byte PPP header on each packet */ 1897 *skb_push(skb, 2) = 0; 1898 if (ppp->pass_filter 1899 && sk_run_filter(skb, ppp->pass_filter, 1900 ppp->pass_len) == 0) { 1901 if (ppp->debug & 1) 1902 printk(KERN_DEBUG "PPP: inbound frame not passed\n"); 1903 kfree_skb(skb); 1904 return; 1905 } 1906 if (!(ppp->active_filter 1907 && sk_run_filter(skb, ppp->active_filter, 1908 ppp->active_len) == 0)) 1909 ppp->last_recv = jiffies; 1910 skb_pull(skb, 2); 1911#else 1912 //ppp->last_recv = jiffies; 1913 /* Foxconn removed start, Winster Chan, 01/12/2007 */ 1914 ; /* ppp->last_recv = jiffies; */ 1915 /* Foxconn removed end, Winster Chan, 01/12/2007 */ 1916#endif /* CONFIG_PPP_FILTER */ 1917 1918 if ((ppp->dev->flags & IFF_UP) == 0 1919 || ppp->npmode[npi] != NPMODE_PASS) { 1920 kfree_skb(skb); 1921 } else { 1922 /* chop off protocol */ 1923 skb_pull_rcsum(skb, 2); 1924 skb->dev = ppp->dev; 1925 skb->protocol = htons(npindex_to_ethertype[npi]); 1926 skb_reset_mac_header(skb); 1927 netif_rx(skb); 1928 ppp->dev->last_rx = jiffies; 1929 } 1930 } 1931 return; 1932 1933 err: 1934 kfree_skb(skb); 1935 ppp_receive_error(ppp); 1936} 1937 1938static struct sk_buff * 1939ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb) 1940{ 1941 int proto = PPP_PROTO(skb); 1942 struct sk_buff *ns; 1943 int len; 1944 1945 /* Until we fix all the decompressor's need to make sure 1946 * data portion is linear. 1947 */ 1948 if (!pskb_may_pull(skb, skb->len)) 1949 goto err; 1950 1951 if (proto == PPP_COMP) { 1952 int obuff_size; 1953 1954 switch(ppp->rcomp->compress_proto) { 1955 case CI_MPPE: 1956 obuff_size = ppp->mru + PPP_HDRLEN + 1; 1957 break; 1958 default: 1959 obuff_size = ppp->mru + PPP_HDRLEN; 1960 break; 1961 } 1962 1963 ns = dev_alloc_skb(obuff_size); 1964 if (ns == 0) { 1965 printk(KERN_ERR "ppp_decompress_frame: no memory\n"); 1966 goto err; 1967 } 1968 /* the decompressor still expects the A/C bytes in the hdr */ 1969 len = ppp->rcomp->decompress(ppp->rc_state, skb->data - 2, 1970 skb->len + 2, ns->data, ppp->mru + PPP_HDRLEN); 1971 if (len < 0) { 1972 /* Pass the compressed frame to pppd as an 1973 error indication. */ 1974 if (len == DECOMP_FATALERROR) 1975 ppp->rstate |= SC_DC_FERROR; 1976 kfree_skb(ns); 1977 goto err; 1978 } 1979 1980 kfree_skb(skb); 1981 skb = ns; 1982 skb_put(skb, len); 1983 skb_pull(skb, 2); /* pull off the A/C bytes */ 1984 1985 } else { 1986 /* Uncompressed frame - pass to decompressor so it 1987 can update its dictionary if necessary. */ 1988 if (ppp->rcomp->incomp) 1989 ppp->rcomp->incomp(ppp->rc_state, skb->data - 2, 1990 skb->len + 2); 1991 } 1992 1993 return skb; 1994 1995 err: 1996 ppp->rstate |= SC_DC_ERROR; 1997 ppp_receive_error(ppp); 1998 return skb; 1999} 2000 2001#ifdef CONFIG_PPP_MULTILINK 2002/* 2003 * Receive a multilink frame. 2004 * We put it on the reconstruction queue and then pull off 2005 * as many completed frames as we can. 2006 */ 2007static void 2008ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) 2009{ 2010 u32 mask, seq; 2011 struct channel *ch; 2012 int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; 2013 2014 if (!pskb_may_pull(skb, mphdrlen) || ppp->mrru == 0) 2015 goto err; /* no good, throw it away */ 2016 2017 /* Decode sequence number and begin/end bits */ 2018 if (ppp->flags & SC_MP_SHORTSEQ) { 2019 seq = ((skb->data[2] & 0x0f) << 8) | skb->data[3]; 2020 mask = 0xfff; 2021 } else { 2022 seq = (skb->data[3] << 16) | (skb->data[4] << 8)| skb->data[5]; 2023 mask = 0xffffff; 2024 } 2025 skb->BEbits = skb->data[2]; 2026 skb_pull(skb, mphdrlen); /* pull off PPP and MP headers */ 2027 2028 /* 2029 * Do protocol ID decompression on the first fragment of each packet. 2030 */ 2031 if ((skb->BEbits & B) && (skb->data[0] & 1)) 2032 *skb_push(skb, 1) = 0; 2033 2034 /* 2035 * Expand sequence number to 32 bits, making it as close 2036 * as possible to ppp->minseq. 2037 */ 2038 seq |= ppp->minseq & ~mask; 2039 if ((int)(ppp->minseq - seq) > (int)(mask >> 1)) 2040 seq += mask + 1; 2041 else if ((int)(seq - ppp->minseq) > (int)(mask >> 1)) 2042 seq -= mask + 1; /* should never happen */ 2043 skb->sequence = seq; 2044 pch->lastseq = seq; 2045 2046 /* 2047 * If this packet comes before the next one we were expecting, 2048 * drop it. 2049 */ 2050 if (seq_before(seq, ppp->nextseq)) { 2051 kfree_skb(skb); 2052 ++ppp->stats.rx_dropped; 2053 ppp_receive_error(ppp); 2054 return; 2055 } 2056 2057 /* 2058 * Reevaluate minseq, the minimum over all channels of the 2059 * last sequence number received on each channel. Because of 2060 * the increasing sequence number rule, we know that any fragment 2061 * before `minseq' which hasn't arrived is never going to arrive. 2062 * The list of channels can't change because we have the receive 2063 * side of the ppp unit locked. 2064 */ 2065 list_for_each_entry(ch, &ppp->channels, clist) { 2066 if (seq_before(ch->lastseq, seq)) 2067 seq = ch->lastseq; 2068 } 2069 if (seq_before(ppp->minseq, seq)) 2070 ppp->minseq = seq; 2071 2072 /* Put the fragment on the reconstruction queue */ 2073 ppp_mp_insert(ppp, skb); 2074 2075 /* If the queue is getting long, don't wait any longer for packets 2076 before the start of the queue. */ 2077 if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN 2078 && seq_before(ppp->minseq, ppp->mrq.next->sequence)) 2079 ppp->minseq = ppp->mrq.next->sequence; 2080 2081 /* Pull completed packets off the queue and receive them. */ 2082 while ((skb = ppp_mp_reconstruct(ppp)) != 0) 2083 ppp_receive_nonmp_frame(ppp, skb); 2084 2085 return; 2086 2087 err: 2088 kfree_skb(skb); 2089 ppp_receive_error(ppp); 2090} 2091 2092/* 2093 * Insert a fragment on the MP reconstruction queue. 2094 * The queue is ordered by increasing sequence number. 2095 */ 2096static void 2097ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb) 2098{ 2099 struct sk_buff *p; 2100 struct sk_buff_head *list = &ppp->mrq; 2101 u32 seq = skb->sequence; 2102 2103 /* N.B. we don't need to lock the list lock because we have the 2104 ppp unit receive-side lock. */ 2105 for (p = list->next; p != (struct sk_buff *)list; p = p->next) 2106 if (seq_before(seq, p->sequence)) 2107 break; 2108 __skb_insert(skb, p->prev, p, list); 2109} 2110 2111/* 2112 * Reconstruct a packet from the MP fragment queue. 2113 * We go through increasing sequence numbers until we find a 2114 * complete packet, or we get to the sequence number for a fragment 2115 * which hasn't arrived but might still do so. 2116 */ 2117struct sk_buff * 2118ppp_mp_reconstruct(struct ppp *ppp) 2119{ 2120 u32 seq = ppp->nextseq; 2121 u32 minseq = ppp->minseq; 2122 struct sk_buff_head *list = &ppp->mrq; 2123 struct sk_buff *p, *next; 2124 struct sk_buff *head, *tail; 2125 struct sk_buff *skb = NULL; 2126 int lost = 0, len = 0; 2127 2128 if (ppp->mrru == 0) /* do nothing until mrru is set */ 2129 return NULL; 2130 head = list->next; 2131 tail = NULL; 2132 for (p = head; p != (struct sk_buff *) list; p = next) { 2133 next = p->next; 2134 if (seq_before(p->sequence, seq)) { 2135 /* this can't happen, anyway ignore the skb */ 2136 printk(KERN_ERR "ppp_mp_reconstruct bad seq %u < %u\n", 2137 p->sequence, seq); 2138 head = next; 2139 continue; 2140 } 2141 if (p->sequence != seq) { 2142 /* Fragment `seq' is missing. If it is after 2143 minseq, it might arrive later, so stop here. */ 2144 if (seq_after(seq, minseq)) 2145 break; 2146 /* Fragment `seq' is lost, keep going. */ 2147 lost = 1; 2148 seq = seq_before(minseq, p->sequence)? 2149 minseq + 1: p->sequence; 2150 next = p; 2151 continue; 2152 } 2153 2154 /* 2155 * At this point we know that all the fragments from 2156 * ppp->nextseq to seq are either present or lost. 2157 * Also, there are no complete packets in the queue 2158 * that have no missing fragments and end before this 2159 * fragment. 2160 */ 2161 2162 /* B bit set indicates this fragment starts a packet */ 2163 if (p->BEbits & B) { 2164 head = p; 2165 lost = 0; 2166 len = 0; 2167 } 2168 2169 len += p->len; 2170 2171 /* Got a complete packet yet? */ 2172 if (lost == 0 && (p->BEbits & E) && (head->BEbits & B)) { 2173 if (len > ppp->mrru + 2) { 2174 ++ppp->stats.rx_length_errors; 2175 printk(KERN_DEBUG "PPP: reconstructed packet" 2176 " is too long (%d)\n", len); 2177 } else if (p == head) { 2178 /* fragment is complete packet - reuse skb */ 2179 tail = p; 2180 skb = skb_get(p); 2181 break; 2182 } else if ((skb = dev_alloc_skb(len)) == NULL) { 2183 ++ppp->stats.rx_missed_errors; 2184 printk(KERN_DEBUG "PPP: no memory for " 2185 "reconstructed packet"); 2186 } else { 2187 tail = p; 2188 break; 2189 } 2190 ppp->nextseq = seq + 1; 2191 } 2192 2193 /* 2194 * If this is the ending fragment of a packet, 2195 * and we haven't found a complete valid packet yet, 2196 * we can discard up to and including this fragment. 2197 */ 2198 if (p->BEbits & E) 2199 head = next; 2200 2201 ++seq; 2202 } 2203 2204 /* If we have a complete packet, copy it all into one skb. */ 2205 if (tail != NULL) { 2206 /* If we have discarded any fragments, 2207 signal a receive error. */ 2208 if (head->sequence != ppp->nextseq) { 2209 if (ppp->debug & 1) 2210 printk(KERN_DEBUG " missed pkts %u..%u\n", 2211 ppp->nextseq, head->sequence-1); 2212 ++ppp->stats.rx_dropped; 2213 ppp_receive_error(ppp); 2214 } 2215 2216 if (head != tail) 2217 /* copy to a single skb */ 2218 for (p = head; p != tail->next; p = p->next) 2219 skb_copy_bits(p, 0, skb_put(skb, p->len), p->len); 2220 ppp->nextseq = tail->sequence + 1; 2221 head = tail->next; 2222 } 2223 2224 /* Discard all the skbuffs that we have copied the data out of 2225 or that we can't use. */ 2226 while ((p = list->next) != head) { 2227 __skb_unlink(p, list); 2228 kfree_skb(p); 2229 } 2230 2231 return skb; 2232} 2233#endif /* CONFIG_PPP_MULTILINK */ 2234 2235/* 2236 * Channel interface. 2237 */ 2238 2239/* 2240 * Create a new, unattached ppp channel. 2241 */ 2242int 2243ppp_register_channel(struct ppp_channel *chan) 2244{ 2245 struct channel *pch; 2246 2247 pch = kzalloc(sizeof(struct channel), GFP_KERNEL); 2248 if (pch == 0) 2249 return -ENOMEM; 2250 pch->ppp = NULL; 2251 pch->chan = chan; 2252 chan->ppp = pch; 2253 init_ppp_file(&pch->file, CHANNEL); 2254 pch->file.hdrlen = chan->hdrlen; 2255#ifdef CONFIG_PPP_MULTILINK 2256 pch->lastseq = -1; 2257#endif /* CONFIG_PPP_MULTILINK */ 2258 init_rwsem(&pch->chan_sem); 2259 spin_lock_init(&pch->downl); 2260 rwlock_init(&pch->upl); 2261 spin_lock_bh(&all_channels_lock); 2262 pch->file.index = ++last_channel_index; 2263 list_add(&pch->list, &new_channels); 2264 atomic_inc(&channel_count); 2265 spin_unlock_bh(&all_channels_lock); 2266 return 0; 2267} 2268 2269/* 2270 * Return the index of a channel. 2271 */ 2272int ppp_channel_index(struct ppp_channel *chan) 2273{ 2274 struct channel *pch = chan->ppp; 2275 2276 if (pch != 0) 2277 return pch->file.index; 2278 return -1; 2279} 2280 2281/* 2282 * Return the PPP unit number to which a channel is connected. 2283 */ 2284int ppp_unit_number(struct ppp_channel *chan) 2285{ 2286 struct channel *pch = chan->ppp; 2287 int unit = -1; 2288 2289 if (pch != 0) { 2290 read_lock_bh(&pch->upl); 2291 if (pch->ppp != 0) 2292 unit = pch->ppp->file.index; 2293 read_unlock_bh(&pch->upl); 2294 } 2295 return unit; 2296} 2297 2298/* 2299 * Disconnect a channel from the generic layer. 2300 * This must be called in process context. 2301 */ 2302void 2303ppp_unregister_channel(struct ppp_channel *chan) 2304{ 2305 struct channel *pch = chan->ppp; 2306 2307 if (pch == 0) 2308 return; /* should never happen */ 2309 chan->ppp = NULL; 2310 2311 /* 2312 * This ensures that we have returned from any calls into the 2313 * the channel's start_xmit or ioctl routine before we proceed. 2314 */ 2315 down_write(&pch->chan_sem); 2316 spin_lock_bh(&pch->downl); 2317 pch->chan = NULL; 2318 spin_unlock_bh(&pch->downl); 2319 up_write(&pch->chan_sem); 2320 ppp_disconnect_channel(pch); 2321 spin_lock_bh(&all_channels_lock); 2322 list_del(&pch->list); 2323 spin_unlock_bh(&all_channels_lock); 2324 pch->file.dead = 1; 2325 wake_up_interruptible(&pch->file.rwait); 2326 if (atomic_dec_and_test(&pch->file.refcnt)) 2327 ppp_destroy_channel(pch); 2328} 2329 2330/* 2331 * Callback from a channel when it can accept more to transmit. 2332 * This should be called at BH/softirq level, not interrupt level. 2333 */ 2334void 2335ppp_output_wakeup(struct ppp_channel *chan) 2336{ 2337 struct channel *pch = chan->ppp; 2338 2339 if (pch == 0) 2340 return; 2341 ppp_channel_push(pch); 2342} 2343 2344/* 2345 * Compression control. 2346 */ 2347 2348/* Process the PPPIOCSCOMPRESS ioctl. */ 2349static int 2350ppp_set_compress(struct ppp *ppp, unsigned long arg) 2351{ 2352 int err; 2353 struct compressor *cp, *ocomp; 2354 struct ppp_option_data data; 2355 void *state, *ostate; 2356 unsigned char ccp_option[CCP_MAX_OPTION_LENGTH]; 2357 2358 err = -EFAULT; 2359 if (copy_from_user(&data, (void __user *) arg, sizeof(data)) 2360 || (data.length <= CCP_MAX_OPTION_LENGTH 2361 && copy_from_user(ccp_option, (void __user *) data.ptr, data.length))) 2362 goto out; 2363 err = -EINVAL; 2364 if (data.length > CCP_MAX_OPTION_LENGTH 2365 || ccp_option[1] < 2 || ccp_option[1] > data.length) 2366 goto out; 2367 2368 cp = find_compressor(ccp_option[0]); 2369#ifdef CONFIG_KMOD 2370 if (cp == 0) { 2371 request_module("ppp-compress-%d", ccp_option[0]); 2372 cp = find_compressor(ccp_option[0]); 2373 } 2374#endif /* CONFIG_KMOD */ 2375 if (cp == 0) 2376 goto out; 2377 2378 err = -ENOBUFS; 2379 if (data.transmit) { 2380 state = cp->comp_alloc(ccp_option, data.length); 2381 if (state != 0) { 2382 ppp_xmit_lock(ppp); 2383 ppp->xstate &= ~SC_COMP_RUN; 2384 ocomp = ppp->xcomp; 2385 ostate = ppp->xc_state; 2386 ppp->xcomp = cp; 2387 ppp->xc_state = state; 2388 ppp_xmit_unlock(ppp); 2389 if (ostate != 0) { 2390 ocomp->comp_free(ostate); 2391 module_put(ocomp->owner); 2392 } 2393 err = 0; 2394 } else 2395 module_put(cp->owner); 2396 2397 } else { 2398 state = cp->decomp_alloc(ccp_option, data.length); 2399 if (state != 0) { 2400 ppp_recv_lock(ppp); 2401 ppp->rstate &= ~SC_DECOMP_RUN; 2402 ocomp = ppp->rcomp; 2403 ostate = ppp->rc_state; 2404 ppp->rcomp = cp; 2405 ppp->rc_state = state; 2406 ppp_recv_unlock(ppp); 2407 if (ostate != 0) { 2408 ocomp->decomp_free(ostate); 2409 module_put(ocomp->owner); 2410 } 2411 err = 0; 2412 } else 2413 module_put(cp->owner); 2414 } 2415 2416 out: 2417 return err; 2418} 2419 2420/* 2421 * Look at a CCP packet and update our state accordingly. 2422 * We assume the caller has the xmit or recv path locked. 2423 */ 2424static void 2425ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound) 2426{ 2427 unsigned char *dp; 2428 int len; 2429 2430 if (!pskb_may_pull(skb, CCP_HDRLEN + 2)) 2431 return; /* no header */ 2432 dp = skb->data + 2; 2433 2434 switch (CCP_CODE(dp)) { 2435 case CCP_CONFREQ: 2436 2437 /* A ConfReq starts negotiation of compression 2438 * in one direction of transmission, 2439 * and hence brings it down...but which way? 2440 * 2441 * Remember: 2442 * A ConfReq indicates what the sender would like to receive 2443 */ 2444 if(inbound) 2445 /* He is proposing what I should send */ 2446 ppp->xstate &= ~SC_COMP_RUN; 2447 else 2448 /* I am proposing to what he should send */ 2449 ppp->rstate &= ~SC_DECOMP_RUN; 2450 2451 break; 2452 2453 case CCP_TERMREQ: 2454 case CCP_TERMACK: 2455 /* 2456 * CCP is going down, both directions of transmission 2457 */ 2458 ppp->rstate &= ~SC_DECOMP_RUN; 2459 ppp->xstate &= ~SC_COMP_RUN; 2460 break; 2461 2462 case CCP_CONFACK: 2463 if ((ppp->flags & (SC_CCP_OPEN | SC_CCP_UP)) != SC_CCP_OPEN) 2464 break; 2465 len = CCP_LENGTH(dp); 2466 if (!pskb_may_pull(skb, len + 2)) 2467 return; /* too short */ 2468 dp += CCP_HDRLEN; 2469 len -= CCP_HDRLEN; 2470 if (len < CCP_OPT_MINLEN || len < CCP_OPT_LENGTH(dp)) 2471 break; 2472 if (inbound) { 2473 /* we will start receiving compressed packets */ 2474 if (ppp->rc_state == 0) 2475 break; 2476 if (ppp->rcomp->decomp_init(ppp->rc_state, dp, len, 2477 ppp->file.index, 0, ppp->mru, ppp->debug)) { 2478 ppp->rstate |= SC_DECOMP_RUN; 2479 ppp->rstate &= ~(SC_DC_ERROR | SC_DC_FERROR); 2480 } 2481 } else { 2482 /* we will soon start sending compressed packets */ 2483 if (ppp->xc_state == 0) 2484 break; 2485 if (ppp->xcomp->comp_init(ppp->xc_state, dp, len, 2486 ppp->file.index, 0, ppp->debug)) 2487 ppp->xstate |= SC_COMP_RUN; 2488 } 2489 break; 2490 2491 case CCP_RESETACK: 2492 /* reset the [de]compressor */ 2493 if ((ppp->flags & SC_CCP_UP) == 0) 2494 break; 2495 if (inbound) { 2496 if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN)) { 2497 ppp->rcomp->decomp_reset(ppp->rc_state); 2498 ppp->rstate &= ~SC_DC_ERROR; 2499 } 2500 } else { 2501 if (ppp->xc_state && (ppp->xstate & SC_COMP_RUN)) 2502 ppp->xcomp->comp_reset(ppp->xc_state); 2503 } 2504 break; 2505 } 2506} 2507 2508/* Free up compression resources. */ 2509static void 2510ppp_ccp_closed(struct ppp *ppp) 2511{ 2512 void *xstate, *rstate; 2513 struct compressor *xcomp, *rcomp; 2514 2515 ppp_lock(ppp); 2516 ppp->flags &= ~(SC_CCP_OPEN | SC_CCP_UP); 2517 ppp->xstate = 0; 2518 xcomp = ppp->xcomp; 2519 xstate = ppp->xc_state; 2520 ppp->xc_state = NULL; 2521 ppp->rstate = 0; 2522 rcomp = ppp->rcomp; 2523 rstate = ppp->rc_state; 2524 ppp->rc_state = NULL; 2525 ppp_unlock(ppp); 2526 2527 if (xstate) { 2528 xcomp->comp_free(xstate); 2529 module_put(xcomp->owner); 2530 } 2531 if (rstate) { 2532 rcomp->decomp_free(rstate); 2533 module_put(rcomp->owner); 2534 } 2535} 2536 2537/* List of compressors. */ 2538static LIST_HEAD(compressor_list); 2539static DEFINE_SPINLOCK(compressor_list_lock); 2540 2541struct compressor_entry { 2542 struct list_head list; 2543 struct compressor *comp; 2544}; 2545 2546static struct compressor_entry * 2547find_comp_entry(int proto) 2548{ 2549 struct compressor_entry *ce; 2550 2551 list_for_each_entry(ce, &compressor_list, list) { 2552 if (ce->comp->compress_proto == proto) 2553 return ce; 2554 } 2555 return NULL; 2556} 2557 2558/* Register a compressor */ 2559int 2560ppp_register_compressor(struct compressor *cp) 2561{ 2562 struct compressor_entry *ce; 2563 int ret; 2564 spin_lock(&compressor_list_lock); 2565 ret = -EEXIST; 2566 if (find_comp_entry(cp->compress_proto) != 0) 2567 goto out; 2568 ret = -ENOMEM; 2569 ce = kmalloc(sizeof(struct compressor_entry), GFP_ATOMIC); 2570 if (ce == 0) 2571 goto out; 2572 ret = 0; 2573 ce->comp = cp; 2574 list_add(&ce->list, &compressor_list); 2575 out: 2576 spin_unlock(&compressor_list_lock); 2577 return ret; 2578} 2579 2580/* Unregister a compressor */ 2581void 2582ppp_unregister_compressor(struct compressor *cp) 2583{ 2584 struct compressor_entry *ce; 2585 2586 spin_lock(&compressor_list_lock); 2587 ce = find_comp_entry(cp->compress_proto); 2588 if (ce != 0 && ce->comp == cp) { 2589 list_del(&ce->list); 2590 kfree(ce); 2591 } 2592 spin_unlock(&compressor_list_lock); 2593} 2594 2595/* Find a compressor. */ 2596static struct compressor * 2597find_compressor(int type) 2598{ 2599 struct compressor_entry *ce; 2600 struct compressor *cp = NULL; 2601 2602 spin_lock(&compressor_list_lock); 2603 ce = find_comp_entry(type); 2604 if (ce != 0) { 2605 cp = ce->comp; 2606 if (!try_module_get(cp->owner)) 2607 cp = NULL; 2608 } 2609 spin_unlock(&compressor_list_lock); 2610 return cp; 2611} 2612 2613/* 2614 * Miscelleneous stuff. 2615 */ 2616 2617static void 2618ppp_get_stats(struct ppp *ppp, struct ppp_stats *st) 2619{ 2620 struct slcompress *vj = ppp->vj; 2621 2622 memset(st, 0, sizeof(*st)); 2623 st->p.ppp_ipackets = ppp->stats.rx_packets; 2624 st->p.ppp_ierrors = ppp->stats.rx_errors; 2625 st->p.ppp_ibytes = ppp->stats.rx_bytes; 2626 st->p.ppp_opackets = ppp->stats.tx_packets; 2627 st->p.ppp_oerrors = ppp->stats.tx_errors; 2628 st->p.ppp_obytes = ppp->stats.tx_bytes; 2629 if (vj == 0) 2630 return; 2631 st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed; 2632 st->vj.vjs_compressed = vj->sls_o_compressed; 2633 st->vj.vjs_searches = vj->sls_o_searches; 2634 st->vj.vjs_misses = vj->sls_o_misses; 2635 st->vj.vjs_errorin = vj->sls_i_error; 2636 st->vj.vjs_tossed = vj->sls_i_tossed; 2637 st->vj.vjs_uncompressedin = vj->sls_i_uncompressed; 2638 st->vj.vjs_compressedin = vj->sls_i_compressed; 2639} 2640 2641/* 2642 * Stuff for handling the lists of ppp units and channels 2643 * and for initialization. 2644 */ 2645 2646/* 2647 * Create a new ppp interface unit. Fails if it can't allocate memory 2648 * or if there is already a unit with the requested number. 2649 * unit == -1 means allocate a new number. 2650 */ 2651static struct ppp * 2652ppp_create_interface(int unit, int *retp) 2653{ 2654 struct ppp *ppp; 2655 struct net_device *dev = NULL; 2656 int ret = -ENOMEM; 2657 int i; 2658 2659 ppp = kzalloc(sizeof(struct ppp), GFP_KERNEL); 2660 if (!ppp) 2661 goto out; 2662 dev = alloc_netdev(0, "", ppp_setup); 2663 if (!dev) 2664 goto out1; 2665 2666 ppp->mru = PPP_MRU; 2667 init_ppp_file(&ppp->file, INTERFACE); 2668 ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */ 2669 for (i = 0; i < NUM_NP; ++i) 2670 ppp->npmode[i] = NPMODE_PASS; 2671 INIT_LIST_HEAD(&ppp->channels); 2672 spin_lock_init(&ppp->rlock); 2673 spin_lock_init(&ppp->wlock); 2674#ifdef CONFIG_PPP_MULTILINK 2675 ppp->minseq = -1; 2676 skb_queue_head_init(&ppp->mrq); 2677#endif /* CONFIG_PPP_MULTILINK */ 2678 ppp->dev = dev; 2679 dev->priv = ppp; 2680 2681 dev->hard_start_xmit = ppp_start_xmit; 2682 dev->get_stats = ppp_net_stats; 2683 dev->do_ioctl = ppp_net_ioctl; 2684 2685 /* foxconn wklin added start, 11/06/2008 */ 2686 { 2687#define nvram_safe_get(name) (nvram_get(name) ? : "") 2688 char *value = nvram_safe_get("wan_proto"); 2689 if(!strcmp(value, "pppoe")) 2690 dev->acos_flags |= NETIF_ACOSFLAGS_PPPOE; 2691 if(!strcmp(value, "pptp")) 2692 dev->acos_flags |= NETIF_ACOSFLAGS_PPTP; 2693 } 2694 2695 ret = -EEXIST; 2696 mutex_lock(&all_ppp_mutex); 2697 if (unit < 0) 2698 unit = cardmap_find_first_free(all_ppp_units); 2699 else if (cardmap_get(all_ppp_units, unit) != NULL) 2700 goto out2; /* unit already exists */ 2701 2702 /* Initialize the new ppp unit */ 2703 ppp->file.index = unit; 2704 sprintf(dev->name, "ppp%d", unit); 2705 2706 ret = register_netdev(dev); 2707 if (ret != 0) { 2708 printk(KERN_ERR "PPP: couldn't register device %s (%d)\n", 2709 dev->name, ret); 2710 goto out2; 2711 } 2712 2713 atomic_inc(&ppp_unit_count); 2714 ret = cardmap_set(&all_ppp_units, unit, ppp); 2715 if (ret != 0) 2716 goto out3; 2717 2718#ifdef HNDCTF 2719 if ((ctf_dev_register(kcih, dev, FALSE) != BCME_OK) || 2720 (ctf_enable(kcih, dev, TRUE, NULL) != BCME_OK)) 2721 ctf_dev_unregister(kcih, dev); 2722#endif 2723 2724 mutex_unlock(&all_ppp_mutex); 2725 *retp = 0; 2726 return ppp; 2727 2728out3: 2729 atomic_dec(&ppp_unit_count); 2730out2: 2731 mutex_unlock(&all_ppp_mutex); 2732 free_netdev(dev); 2733out1: 2734 kfree(ppp); 2735out: 2736 *retp = ret; 2737 return NULL; 2738} 2739 2740/* 2741 * Initialize a ppp_file structure. 2742 */ 2743static void 2744init_ppp_file(struct ppp_file *pf, int kind) 2745{ 2746 pf->kind = kind; 2747 skb_queue_head_init(&pf->xq); 2748 skb_queue_head_init(&pf->rq); 2749 atomic_set(&pf->refcnt, 1); 2750 init_waitqueue_head(&pf->rwait); 2751} 2752 2753/* 2754 * Take down a ppp interface unit - called when the owning file 2755 * (the one that created the unit) is closed or detached. 2756 */ 2757static void ppp_shutdown_interface(struct ppp *ppp) 2758{ 2759 struct net_device *dev; 2760 2761 mutex_lock(&all_ppp_mutex); 2762 ppp_lock(ppp); 2763 dev = ppp->dev; 2764 ppp->dev = NULL; 2765 ppp_unlock(ppp); 2766 /* This will call dev_close() for us. */ 2767 if (dev) { 2768#ifdef HNDCTF 2769 ctf_dev_unregister(kcih, dev); 2770#endif 2771 unregister_netdev(dev); 2772 free_netdev(dev); 2773 } 2774 cardmap_set(&all_ppp_units, ppp->file.index, NULL); 2775 ppp->file.dead = 1; 2776 ppp->owner = NULL; 2777 wake_up_interruptible(&ppp->file.rwait); 2778 mutex_unlock(&all_ppp_mutex); 2779} 2780 2781/* 2782 * Free the memory used by a ppp unit. This is only called once 2783 * there are no channels connected to the unit and no file structs 2784 * that reference the unit. 2785 */ 2786static void ppp_destroy_interface(struct ppp *ppp) 2787{ 2788 atomic_dec(&ppp_unit_count); 2789 2790 if (!ppp->file.dead || ppp->n_channels) { 2791 /* "can't happen" */ 2792 printk(KERN_ERR "ppp: destroying ppp struct %p but dead=%d " 2793 "n_channels=%d !\n", ppp, ppp->file.dead, 2794 ppp->n_channels); 2795 return; 2796 } 2797 2798 ppp_ccp_closed(ppp); 2799 if (ppp->vj) { 2800 slhc_free(ppp->vj); 2801 ppp->vj = NULL; 2802 } 2803 skb_queue_purge(&ppp->file.xq); 2804 skb_queue_purge(&ppp->file.rq); 2805#ifdef CONFIG_PPP_MULTILINK 2806 skb_queue_purge(&ppp->mrq); 2807#endif /* CONFIG_PPP_MULTILINK */ 2808#ifdef CONFIG_PPP_FILTER 2809 kfree(ppp->pass_filter); 2810 ppp->pass_filter = NULL; 2811 kfree(ppp->active_filter); 2812 ppp->active_filter = NULL; 2813#endif /* CONFIG_PPP_FILTER */ 2814 2815 if (ppp->xmit_pending) 2816 kfree_skb(ppp->xmit_pending); 2817 2818 kfree(ppp); 2819} 2820 2821/* 2822 * Locate an existing ppp unit. 2823 * The caller should have locked the all_ppp_mutex. 2824 */ 2825static struct ppp * 2826ppp_find_unit(int unit) 2827{ 2828 return cardmap_get(all_ppp_units, unit); 2829} 2830 2831/* 2832 * Locate an existing ppp channel. 2833 * The caller should have locked the all_channels_lock. 2834 * First we look in the new_channels list, then in the 2835 * all_channels list. If found in the new_channels list, 2836 * we move it to the all_channels list. This is for speed 2837 * when we have a lot of channels in use. 2838 */ 2839static struct channel * 2840ppp_find_channel(int unit) 2841{ 2842 struct channel *pch; 2843 2844 list_for_each_entry(pch, &new_channels, list) { 2845 if (pch->file.index == unit) { 2846 list_move(&pch->list, &all_channels); 2847 return pch; 2848 } 2849 } 2850 list_for_each_entry(pch, &all_channels, list) { 2851 if (pch->file.index == unit) 2852 return pch; 2853 } 2854 return NULL; 2855} 2856 2857/* 2858 * Connect a PPP channel to a PPP interface unit. 2859 */ 2860static int 2861ppp_connect_channel(struct channel *pch, int unit) 2862{ 2863 struct ppp *ppp; 2864 int ret = -ENXIO; 2865 int hdrlen; 2866 2867 mutex_lock(&all_ppp_mutex); 2868 ppp = ppp_find_unit(unit); 2869 if (ppp == 0) 2870 goto out; 2871 write_lock_bh(&pch->upl); 2872 ret = -EINVAL; 2873 if (pch->ppp != 0) 2874 goto outl; 2875 2876 ppp_lock(ppp); 2877 if (pch->file.hdrlen > ppp->file.hdrlen) 2878 ppp->file.hdrlen = pch->file.hdrlen; 2879 hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */ 2880 if (ppp->dev && hdrlen > ppp->dev->hard_header_len) 2881 ppp->dev->hard_header_len = hdrlen; 2882 list_add_tail(&pch->clist, &ppp->channels); 2883 ++ppp->n_channels; 2884 pch->ppp = ppp; 2885 atomic_inc(&ppp->file.refcnt); 2886 ppp_unlock(ppp); 2887 ret = 0; 2888 2889 outl: 2890 write_unlock_bh(&pch->upl); 2891 out: 2892 mutex_unlock(&all_ppp_mutex); 2893 return ret; 2894} 2895 2896/* 2897 * Disconnect a channel from its ppp unit. 2898 */ 2899static int 2900ppp_disconnect_channel(struct channel *pch) 2901{ 2902 struct ppp *ppp; 2903 int err = -EINVAL; 2904 2905 write_lock_bh(&pch->upl); 2906 ppp = pch->ppp; 2907 pch->ppp = NULL; 2908 write_unlock_bh(&pch->upl); 2909 if (ppp != 0) { 2910 /* remove it from the ppp unit's list */ 2911 ppp_lock(ppp); 2912 list_del(&pch->clist); 2913 if (--ppp->n_channels == 0) 2914 wake_up_interruptible(&ppp->file.rwait); 2915 ppp_unlock(ppp); 2916 if (atomic_dec_and_test(&ppp->file.refcnt)) 2917 ppp_destroy_interface(ppp); 2918 err = 0; 2919 } 2920 return err; 2921} 2922 2923/* 2924 * Free up the resources used by a ppp channel. 2925 */ 2926static void ppp_destroy_channel(struct channel *pch) 2927{ 2928 atomic_dec(&channel_count); 2929 2930 if (!pch->file.dead) { 2931 /* "can't happen" */ 2932 printk(KERN_ERR "ppp: destroying undead channel %p !\n", 2933 pch); 2934 return; 2935 } 2936 skb_queue_purge(&pch->file.xq); 2937 skb_queue_purge(&pch->file.rq); 2938 kfree(pch); 2939} 2940 2941static void __exit ppp_cleanup(void) 2942{ 2943 /* should never happen */ 2944 if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count)) 2945 printk(KERN_ERR "PPP: removing module but units remain!\n"); 2946 cardmap_destroy(&all_ppp_units); 2947 if (unregister_chrdev(PPP_MAJOR, "ppp") != 0) 2948 printk(KERN_ERR "PPP: failed to unregister PPP device\n"); 2949 device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0)); 2950 class_destroy(ppp_class); 2951} 2952 2953/* 2954 * Cardmap implementation. 2955 */ 2956static void *cardmap_get(struct cardmap *map, unsigned int nr) 2957{ 2958 struct cardmap *p; 2959 int i; 2960 2961 for (p = map; p != NULL; ) { 2962 if ((i = nr >> p->shift) >= CARDMAP_WIDTH) 2963 return NULL; 2964 if (p->shift == 0) 2965 return p->ptr[i]; 2966 nr &= ~(CARDMAP_MASK << p->shift); 2967 p = p->ptr[i]; 2968 } 2969 return NULL; 2970} 2971 2972static int cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr) 2973{ 2974 struct cardmap *p; 2975 int i; 2976 2977 p = *pmap; 2978 if (p == NULL || (nr >> p->shift) >= CARDMAP_WIDTH) { 2979 do { 2980 /* need a new top level */ 2981 struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL); 2982 if (!np) 2983 goto enomem; 2984 np->ptr[0] = p; 2985 if (p != NULL) { 2986 np->shift = p->shift + CARDMAP_ORDER; 2987 p->parent = np; 2988 } else 2989 np->shift = 0; 2990 p = np; 2991 } while ((nr >> p->shift) >= CARDMAP_WIDTH); 2992 *pmap = p; 2993 } 2994 while (p->shift > 0) { 2995 i = (nr >> p->shift) & CARDMAP_MASK; 2996 if (p->ptr[i] == NULL) { 2997 struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL); 2998 if (!np) 2999 goto enomem; 3000 np->shift = p->shift - CARDMAP_ORDER; 3001 np->parent = p; 3002 p->ptr[i] = np; 3003 } 3004 if (ptr == NULL) 3005 clear_bit(i, &p->inuse); 3006 p = p->ptr[i]; 3007 } 3008 i = nr & CARDMAP_MASK; 3009 p->ptr[i] = ptr; 3010 if (ptr != NULL) 3011 set_bit(i, &p->inuse); 3012 else 3013 clear_bit(i, &p->inuse); 3014 return 0; 3015 enomem: 3016 return -ENOMEM; 3017} 3018 3019static unsigned int cardmap_find_first_free(struct cardmap *map) 3020{ 3021 struct cardmap *p; 3022 unsigned int nr = 0; 3023 int i; 3024 3025 if ((p = map) == NULL) 3026 return 0; 3027 for (;;) { 3028 i = find_first_zero_bit(&p->inuse, CARDMAP_WIDTH); 3029 if (i >= CARDMAP_WIDTH) { 3030 if (p->parent == NULL) 3031 return CARDMAP_WIDTH << p->shift; 3032 p = p->parent; 3033 i = (nr >> p->shift) & CARDMAP_MASK; 3034 set_bit(i, &p->inuse); 3035 continue; 3036 } 3037 nr = (nr & (~CARDMAP_MASK << p->shift)) | (i << p->shift); 3038 if (p->shift == 0 || p->ptr[i] == NULL) 3039 return nr; 3040 p = p->ptr[i]; 3041 } 3042} 3043 3044static void cardmap_destroy(struct cardmap **pmap) 3045{ 3046 struct cardmap *p, *np; 3047 int i; 3048 3049 for (p = *pmap; p != NULL; p = np) { 3050 if (p->shift != 0) { 3051 for (i = 0; i < CARDMAP_WIDTH; ++i) 3052 if (p->ptr[i] != NULL) 3053 break; 3054 if (i < CARDMAP_WIDTH) { 3055 np = p->ptr[i]; 3056 p->ptr[i] = NULL; 3057 continue; 3058 } 3059 } 3060 np = p->parent; 3061 kfree(p); 3062 } 3063 *pmap = NULL; 3064} 3065 3066#ifdef CTF_PPPOE 3067void 3068ppp_rxstats_upd(void *pppif, struct sk_buff *skb) 3069{ 3070 struct ppp *ppp = ((struct net_device *)pppif)->priv; 3071 ++ppp->stats.rx_packets; 3072 ppp->stats.rx_bytes += skb->len; 3073 ppp->last_recv = jiffies; 3074} 3075 3076void 3077ppp_txstats_upd(void *pppif, struct sk_buff *skb) 3078{ 3079 struct ppp *ppp = ((struct net_device *)pppif)->priv; 3080 ++ppp->stats.tx_packets; 3081 ppp->stats.tx_bytes += skb->len; 3082 ppp->last_xmit = jiffies; 3083} 3084 3085EXPORT_SYMBOL(ppp_rxstats_upd); 3086EXPORT_SYMBOL(ppp_txstats_upd); 3087#endif /* CTF_PPPOE */ 3088 3089/* Module/initialization stuff */ 3090 3091module_init(ppp_init); 3092module_exit(ppp_cleanup); 3093 3094EXPORT_SYMBOL(ppp_register_channel); 3095EXPORT_SYMBOL(ppp_unregister_channel); 3096EXPORT_SYMBOL(ppp_channel_index); 3097EXPORT_SYMBOL(ppp_unit_number); 3098EXPORT_SYMBOL(ppp_input); 3099EXPORT_SYMBOL(ppp_input_error); 3100EXPORT_SYMBOL(ppp_output_wakeup); 3101EXPORT_SYMBOL(ppp_register_compressor); 3102EXPORT_SYMBOL(ppp_unregister_compressor); 3103MODULE_LICENSE("GPL"); 3104MODULE_ALIAS_CHARDEV_MAJOR(PPP_MAJOR); 3105MODULE_ALIAS("/dev/ppp"); 3106