1/* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Definitions for the Interfaces handler. 7 * 8 * Version: @(#)dev.h 1.0.10 08/12/93 9 * 10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Corey Minyard <wf-rch!minyard@relay.EU.net> 13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov> 14 * Alan Cox, <Alan.Cox@linux.org> 15 * Bjorn Ekwall. <bj0rn@blox.se> 16 * Pekka Riikonen <priikone@poseidon.pspt.fi> 17 * 18 * This program is free software; you can redistribute it and/or 19 * modify it under the terms of the GNU General Public License 20 * as published by the Free Software Foundation; either version 21 * 2 of the License, or (at your option) any later version. 22 * 23 * Moved to /usr/include/linux for NET3 24 */ 25#ifndef _LINUX_NETDEVICE_H 26#define _LINUX_NETDEVICE_H 27 28#include <linux/if.h> 29#include <linux/if_ether.h> 30#include <linux/if_packet.h> 31 32#include <asm/atomic.h> 33#include <asm/cache.h> 34#include <asm/byteorder.h> 35 36#ifdef __KERNEL__ 37#include <linux/config.h> 38#ifdef CONFIG_NET_PROFILE 39#include <net/profile.h> 40#endif 41 42struct divert_blk; 43struct vlan_group; 44 45#define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev 46 functions are available. */ 47 48#define NET_XMIT_SUCCESS 0 49#define NET_XMIT_DROP 1 /* skb dropped */ 50#define NET_XMIT_CN 2 /* congestion notification */ 51#define NET_XMIT_POLICED 3 /* skb is shot by police */ 52#define NET_XMIT_BYPASS 4 /* packet does not leave via dequeue; 53 (TC use only - dev_queue_xmit 54 returns this as NET_XMIT_SUCCESS) */ 55 56/* Backlog congestion levels */ 57#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ 58#define NET_RX_DROP 1 /* packet dropped */ 59#define NET_RX_CN_LOW 2 /* storm alert, just in case */ 60#define NET_RX_CN_MOD 3 /* Storm on its way! */ 61#define NET_RX_CN_HIGH 4 /* The storm is here */ 62#define NET_RX_BAD 5 /* packet dropped due to kernel error */ 63 64#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) 65 66#endif 67 68#define MAX_ADDR_LEN 8 /* Largest hardware address length */ 69 70/* 71 * Compute the worst case header length according to the protocols 72 * used. 73 */ 74 75#if !defined(CONFIG_AX25) && !defined(CONFIG_AX25_MODULE) && !defined(CONFIG_TR) 76#define LL_MAX_HEADER 32 77#else 78#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) 79#define LL_MAX_HEADER 96 80#else 81#define LL_MAX_HEADER 48 82#endif 83#endif 84 85#if !defined(CONFIG_NET_IPIP) && \ 86 !defined(CONFIG_IPV6) && !defined(CONFIG_IPV6_MODULE) 87#define MAX_HEADER LL_MAX_HEADER 88#else 89#define MAX_HEADER (LL_MAX_HEADER + 48) 90#endif 91 92/* 93 * Network device statistics. Akin to the 2.0 ether stats but 94 * with byte counters. 95 */ 96 97struct net_device_stats 98{ 99 unsigned long rx_packets; /* total packets received */ 100 unsigned long tx_packets; /* total packets transmitted */ 101 unsigned long rx_bytes; /* total bytes received */ 102 unsigned long tx_bytes; /* total bytes transmitted */ 103 unsigned long rx_errors; /* bad packets received */ 104 unsigned long tx_errors; /* packet transmit problems */ 105 unsigned long rx_dropped; /* no space in linux buffers */ 106 unsigned long tx_dropped; /* no space available in linux */ 107 unsigned long multicast; /* multicast packets received */ 108 unsigned long collisions; 109 110 /* detailed rx_errors: */ 111 unsigned long rx_length_errors; 112 unsigned long rx_over_errors; /* receiver ring buff overflow */ 113 unsigned long rx_crc_errors; /* recved pkt with crc error */ 114 unsigned long rx_frame_errors; /* recv'd frame alignment error */ 115 unsigned long rx_fifo_errors; /* recv'r fifo overrun */ 116 unsigned long rx_missed_errors; /* receiver missed packet */ 117 118 /* detailed tx_errors */ 119 unsigned long tx_aborted_errors; 120 unsigned long tx_carrier_errors; 121 unsigned long tx_fifo_errors; 122 unsigned long tx_heartbeat_errors; 123 unsigned long tx_window_errors; 124 125 /* for cslip etc */ 126 unsigned long rx_compressed; 127 unsigned long tx_compressed; 128}; 129 130 131/* Media selection options. */ 132enum { 133 IF_PORT_UNKNOWN = 0, 134 IF_PORT_10BASE2, 135 IF_PORT_10BASET, 136 IF_PORT_AUI, 137 IF_PORT_100BASET, 138 IF_PORT_100BASETX, 139 IF_PORT_100BASEFX 140}; 141 142#ifdef __KERNEL__ 143 144extern const char *if_port_text[]; 145 146#include <linux/cache.h> 147#include <linux/skbuff.h> 148 149struct neighbour; 150struct neigh_parms; 151struct sk_buff; 152 153struct netif_rx_stats 154{ 155 unsigned total; 156 unsigned dropped; 157 unsigned time_squeeze; 158 unsigned throttled; 159 unsigned fastroute_hit; 160 unsigned fastroute_success; 161 unsigned fastroute_defer; 162 unsigned fastroute_deferred_out; 163 unsigned fastroute_latency_reduction; 164 unsigned cpu_collision; 165} ____cacheline_aligned; 166 167extern struct netif_rx_stats netdev_rx_stat[]; 168 169 170/* 171 * We tag multicasts with these structures. 172 */ 173 174struct dev_mc_list 175{ 176 struct dev_mc_list *next; 177 __u8 dmi_addr[MAX_ADDR_LEN]; 178 unsigned char dmi_addrlen; 179 int dmi_users; 180 int dmi_gusers; 181}; 182 183struct hh_cache 184{ 185 struct hh_cache *hh_next; /* Next entry */ 186 atomic_t hh_refcnt; /* number of users */ 187 unsigned short hh_type; /* protocol identifier, f.e ETH_P_IP 188 * NOTE: For VLANs, this will be the 189 * encapuslated type. --BLG 190 */ 191 int hh_len; /* length of header */ 192 int (*hh_output)(struct sk_buff *skb); 193 rwlock_t hh_lock; 194 /* cached hardware header; allow for machine alignment needs. */ 195 unsigned long hh_data[16/sizeof(unsigned long)]; 196}; 197 198/* These flag bits are private to the generic network queueing 199 * layer, they may not be explicitly referenced by any other 200 * code. 201 */ 202 203enum netdev_state_t 204{ 205 __LINK_STATE_XOFF=0, 206 __LINK_STATE_START, 207 __LINK_STATE_PRESENT, 208 __LINK_STATE_SCHED, 209 __LINK_STATE_NOCARRIER, 210 __LINK_STATE_RX_SCHED 211}; 212 213 214/* 215 * This structure holds at boot time configured netdevice settings. They 216 * are then used in the device probing. 217 */ 218struct netdev_boot_setup { 219 char name[IFNAMSIZ]; 220 struct ifmap map; 221}; 222#define NETDEV_BOOT_SETUP_MAX 8 223 224 225 226struct net_device 227{ 228 229 /* 230 * This is the first field of the "visible" part of this structure 231 * (i.e. as seen by users in the "Space.c" file). It is the name 232 * the interface. 233 */ 234 char name[IFNAMSIZ]; 235 236 unsigned long rmem_end; /* shmem "recv" end */ 237 unsigned long rmem_start; /* shmem "recv" start */ 238 unsigned long mem_end; /* shared mem end */ 239 unsigned long mem_start; /* shared mem start */ 240 unsigned long base_addr; /* device I/O address */ 241 unsigned int irq; /* device IRQ number */ 242 243 /* 244 * Some hardware also needs these fields, but they are not 245 * part of the usual set specified in Space.c. 246 */ 247 248 unsigned char if_port; /* Selectable AUI, TP,..*/ 249 unsigned char dma; /* DMA channel */ 250 251 unsigned long state; 252 253 struct net_device *next; 254 255 /* The device initialization function. Called only once. */ 256 int (*init)(struct net_device *dev); 257 258 /* ------- Fields preinitialized in Space.c finish here ------- */ 259 260 struct net_device *next_sched; 261 262 /* Interface index. Unique device identifier */ 263 int ifindex; 264 int iflink; 265 266 267 struct net_device_stats* (*get_stats)(struct net_device *dev); 268 struct iw_statistics* (*get_wireless_stats)(struct net_device *dev); 269 270 /* List of functions to handle Wireless Extensions (instead of ioctl). 271 * See <net/iw_handler.h> for details. Jean II */ 272 struct iw_handler_def * wireless_handlers; 273 274 /* 275 * This marks the end of the "visible" part of the structure. All 276 * fields hereafter are internal to the system, and may change at 277 * will (read: may be cleaned up at will). 278 */ 279 280 /* These may be needed for future network-power-down code. */ 281 unsigned long trans_start; /* Time (in jiffies) of last Tx */ 282 unsigned long last_rx; /* Time of last Rx */ 283 284 unsigned short flags; /* interface flags (a la BSD) */ 285 unsigned short gflags; 286 unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */ 287 unsigned short unused_alignment_fixer; /* Because we need priv_flags, 288 * and we want to be 32-bit aligned. 289 */ 290 291 unsigned mtu; /* interface MTU value */ 292 unsigned short type; /* interface hardware type */ 293 unsigned short hard_header_len; /* hardware hdr length */ 294 void *priv; /* pointer to private data */ 295 296 struct net_device *master; /* Pointer to master device of a group, 297 * which this device is member of. 298 */ 299 300 /* Interface address info. */ 301 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ 302 unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address */ 303 unsigned char addr_len; /* hardware address length */ 304 305 struct dev_mc_list *mc_list; /* Multicast mac addresses */ 306 int mc_count; /* Number of installed mcasts */ 307 int promiscuity; 308 int allmulti; 309 310 int watchdog_timeo; 311 struct timer_list watchdog_timer; 312 313 /* Protocol specific pointers */ 314 315 void *atalk_ptr; /* AppleTalk link */ 316 void *ip_ptr; /* IPv4 specific data */ 317 void *dn_ptr; /* DECnet specific data */ 318 void *ip6_ptr; /* IPv6 specific data */ 319 void *ec_ptr; /* Econet specific data */ 320 321 struct list_head poll_list; /* Link to poll list */ 322 int quota; 323 int weight; 324 325 struct Qdisc *qdisc; 326 struct Qdisc *qdisc_sleeping; 327 struct Qdisc *qdisc_list; 328 struct Qdisc *qdisc_ingress; 329 unsigned long tx_queue_len; /* Max frames per queue allowed */ 330 331 /* hard_start_xmit synchronizer */ 332 spinlock_t xmit_lock; 333 /* cpu id of processor entered to hard_start_xmit or -1, 334 if nobody entered there. 335 */ 336 int xmit_lock_owner; 337 /* device queue lock */ 338 spinlock_t queue_lock; 339 /* Number of references to this device */ 340 atomic_t refcnt; 341 /* The flag marking that device is unregistered, but held by an user */ 342 int deadbeaf; 343 344 /* Net device features */ 345 int features; 346#define NETIF_F_SG 1 /* Scatter/gather IO. */ 347#define NETIF_F_IP_CSUM 2 /* Can checksum only TCP/UDP over IPv4. */ 348#define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */ 349#define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */ 350#define NETIF_F_DYNALLOC 16 /* Self-dectructable device. */ 351#define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */ 352#define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */ 353#define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */ 354#define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */ 355#define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */ 356#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ 357 358 /* Called after device is detached from network. */ 359 void (*uninit)(struct net_device *dev); 360 /* Called after last user reference disappears. */ 361 void (*destructor)(struct net_device *dev); 362 363 /* Pointers to interface service routines. */ 364 int (*open)(struct net_device *dev); 365 int (*stop)(struct net_device *dev); 366 int (*hard_start_xmit) (struct sk_buff *skb, 367 struct net_device *dev); 368#define HAVE_NETDEV_POLL 369 int (*poll) (struct net_device *dev, int *quota); 370 int (*hard_header) (struct sk_buff *skb, 371 struct net_device *dev, 372 unsigned short type, 373 void *daddr, 374 void *saddr, 375 unsigned len); 376 int (*rebuild_header)(struct sk_buff *skb); 377#define HAVE_MULTICAST 378 void (*set_multicast_list)(struct net_device *dev); 379#define HAVE_SET_MAC_ADDR 380 int (*set_mac_address)(struct net_device *dev, 381 void *addr); 382#define HAVE_PRIVATE_IOCTL 383 int (*do_ioctl)(struct net_device *dev, 384 struct ifreq *ifr, int cmd); 385#define HAVE_SET_CONFIG 386 int (*set_config)(struct net_device *dev, 387 struct ifmap *map); 388#define HAVE_HEADER_CACHE 389 int (*hard_header_cache)(struct neighbour *neigh, 390 struct hh_cache *hh); 391 void (*header_cache_update)(struct hh_cache *hh, 392 struct net_device *dev, 393 unsigned char * haddr); 394#define HAVE_CHANGE_MTU 395 int (*change_mtu)(struct net_device *dev, int new_mtu); 396 397#define HAVE_TX_TIMEOUT 398 void (*tx_timeout) (struct net_device *dev); 399 400 void (*vlan_rx_register)(struct net_device *dev, 401 struct vlan_group *grp); 402 void (*vlan_rx_add_vid)(struct net_device *dev, 403 unsigned short vid); 404 void (*vlan_rx_kill_vid)(struct net_device *dev, 405 unsigned short vid); 406 407 int (*hard_header_parse)(struct sk_buff *skb, 408 unsigned char *haddr); 409 int (*neigh_setup)(struct net_device *dev, struct neigh_parms *); 410 int (*accept_fastpath)(struct net_device *, struct dst_entry*); 411 412 /* open/release and usage marking */ 413 struct module *owner; 414 415 /* bridge stuff */ 416 struct net_bridge_port *br_port; 417 418#ifdef CONFIG_NET_FASTROUTE 419#define NETDEV_FASTROUTE_HMASK 0xF 420 /* Semi-private data. Keep it at the end of device struct. */ 421 rwlock_t fastpath_lock; 422 struct dst_entry *fastpath[NETDEV_FASTROUTE_HMASK+1]; 423#endif 424#ifdef CONFIG_NET_DIVERT 425 /* this will get initialized at each interface type init routine */ 426 struct divert_blk *divert; 427#endif /* CONFIG_NET_DIVERT */ 428}; 429 430 431struct packet_type 432{ 433 unsigned short type; /* This is really htons(ether_type). */ 434 struct net_device *dev; /* NULL is wildcarded here */ 435 int (*func) (struct sk_buff *, struct net_device *, 436 struct packet_type *); 437 void *data; /* Private to the packet type */ 438 struct packet_type *next; 439}; 440 441 442#include <linux/interrupt.h> 443#include <linux/notifier.h> 444 445extern struct net_device loopback_dev; /* The loopback */ 446extern struct net_device *dev_base; /* All devices */ 447extern rwlock_t dev_base_lock; /* Device list lock */ 448 449extern int netdev_boot_setup_add(char *name, struct ifmap *map); 450extern int netdev_boot_setup_check(struct net_device *dev); 451extern struct net_device *dev_getbyhwaddr(unsigned short type, char *hwaddr); 452extern void dev_add_pack(struct packet_type *pt); 453extern void dev_remove_pack(struct packet_type *pt); 454extern int dev_get(const char *name); 455extern struct net_device *dev_get_by_name(const char *name); 456extern struct net_device *__dev_get_by_name(const char *name); 457extern struct net_device *dev_alloc(const char *name, int *err); 458extern int dev_alloc_name(struct net_device *dev, const char *name); 459extern int dev_open(struct net_device *dev); 460extern int dev_close(struct net_device *dev); 461extern int dev_queue_xmit(struct sk_buff *skb); 462extern int register_netdevice(struct net_device *dev); 463extern int unregister_netdevice(struct net_device *dev); 464extern int register_netdevice_notifier(struct notifier_block *nb); 465extern int unregister_netdevice_notifier(struct notifier_block *nb); 466extern int dev_new_index(void); 467extern struct net_device *dev_get_by_index(int ifindex); 468extern struct net_device *__dev_get_by_index(int ifindex); 469extern int dev_restart(struct net_device *dev); 470 471typedef int gifconf_func_t(struct net_device * dev, char * bufptr, int len); 472extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf); 473static inline int unregister_gifconf(unsigned int family) 474{ 475 return register_gifconf(family, 0); 476} 477 478/* 479 * Incoming packets are placed on per-cpu queues so that 480 * no locking is needed. 481 */ 482 483struct softnet_data 484{ 485 int throttle; 486 int cng_level; 487 int avg_blog; 488 struct sk_buff_head input_pkt_queue; 489 struct list_head poll_list; 490 struct net_device *output_queue; 491 struct sk_buff *completion_queue; 492 493 struct net_device blog_dev; /* Sorry. 8) */ 494} ____cacheline_aligned; 495 496 497extern struct softnet_data softnet_data[NR_CPUS]; 498 499#define HAVE_NETIF_QUEUE 500 501static inline void __netif_schedule(struct net_device *dev) 502{ 503 if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) { 504 unsigned long flags; 505 int cpu = smp_processor_id(); 506 507 local_irq_save(flags); 508 dev->next_sched = softnet_data[cpu].output_queue; 509 softnet_data[cpu].output_queue = dev; 510 cpu_raise_softirq(cpu, NET_TX_SOFTIRQ); 511 local_irq_restore(flags); 512 } 513} 514 515static inline void netif_schedule(struct net_device *dev) 516{ 517 if (!test_bit(__LINK_STATE_XOFF, &dev->state)) 518 __netif_schedule(dev); 519} 520 521static inline void netif_start_queue(struct net_device *dev) 522{ 523 clear_bit(__LINK_STATE_XOFF, &dev->state); 524} 525 526static inline void netif_wake_queue(struct net_device *dev) 527{ 528 if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state)) 529 __netif_schedule(dev); 530} 531 532static inline void netif_stop_queue(struct net_device *dev) 533{ 534 set_bit(__LINK_STATE_XOFF, &dev->state); 535} 536 537static inline int netif_queue_stopped(struct net_device *dev) 538{ 539 return test_bit(__LINK_STATE_XOFF, &dev->state); 540} 541 542static inline int netif_running(struct net_device *dev) 543{ 544 return test_bit(__LINK_STATE_START, &dev->state); 545} 546 547 548/* Use this variant when it is known for sure that it 549 * is executing from interrupt context. 550 */ 551static inline void dev_kfree_skb_irq(struct sk_buff *skb) 552{ 553 if (atomic_dec_and_test(&skb->users)) { 554 int cpu =smp_processor_id(); 555 unsigned long flags; 556 557 local_irq_save(flags); 558 skb->next = softnet_data[cpu].completion_queue; 559 softnet_data[cpu].completion_queue = skb; 560 cpu_raise_softirq(cpu, NET_TX_SOFTIRQ); 561 local_irq_restore(flags); 562 } 563} 564 565/* Use this variant in places where it could be invoked 566 * either from interrupt or non-interrupt context. 567 */ 568static inline void dev_kfree_skb_any(struct sk_buff *skb) 569{ 570 if (in_irq()) 571 dev_kfree_skb_irq(skb); 572 else 573 dev_kfree_skb(skb); 574} 575 576#define HAVE_NETIF_RX 1 577extern int netif_rx(struct sk_buff *skb); 578#define HAVE_NETIF_RECEIVE_SKB 1 579extern int netif_receive_skb(struct sk_buff *skb); 580extern int dev_ioctl(unsigned int cmd, void *); 581extern int dev_change_flags(struct net_device *, unsigned); 582extern void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); 583 584extern void dev_init(void); 585 586extern int netdev_nit; 587 588/* Post buffer to the network code from _non interrupt_ context. 589 * see net/core/dev.c for netif_rx description. 590 */ 591static inline int netif_rx_ni(struct sk_buff *skb) 592{ 593 int err = netif_rx(skb); 594 if (softirq_pending(smp_processor_id())) 595 do_softirq(); 596 return err; 597} 598 599static inline void dev_init_buffers(struct net_device *dev) 600{ 601 /* WILL BE REMOVED IN 2.5.0 */ 602} 603 604extern int netdev_finish_unregister(struct net_device *dev); 605 606static inline void dev_put(struct net_device *dev) 607{ 608 if (atomic_dec_and_test(&dev->refcnt)) 609 netdev_finish_unregister(dev); 610} 611 612#define __dev_put(dev) atomic_dec(&(dev)->refcnt) 613#define dev_hold(dev) atomic_inc(&(dev)->refcnt) 614 615/* Carrier loss detection, dial on demand. The functions netif_carrier_on 616 * and _off may be called from IRQ context, but it is caller 617 * who is responsible for serialization of these calls. 618 */ 619 620static inline int netif_carrier_ok(struct net_device *dev) 621{ 622 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); 623} 624 625extern void __netdev_watchdog_up(struct net_device *dev); 626 627static inline void netif_carrier_on(struct net_device *dev) 628{ 629 clear_bit(__LINK_STATE_NOCARRIER, &dev->state); 630 if (netif_running(dev)) 631 __netdev_watchdog_up(dev); 632} 633 634static inline void netif_carrier_off(struct net_device *dev) 635{ 636 set_bit(__LINK_STATE_NOCARRIER, &dev->state); 637} 638 639/* Hot-plugging. */ 640static inline int netif_device_present(struct net_device *dev) 641{ 642 return test_bit(__LINK_STATE_PRESENT, &dev->state); 643} 644 645static inline void netif_device_detach(struct net_device *dev) 646{ 647 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && 648 netif_running(dev)) { 649 netif_stop_queue(dev); 650 } 651} 652 653static inline void netif_device_attach(struct net_device *dev) 654{ 655 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && 656 netif_running(dev)) { 657 netif_wake_queue(dev); 658 __netdev_watchdog_up(dev); 659 } 660} 661 662/* 663 * Network interface message level settings 664 */ 665#define HAVE_NETIF_MSG 1 666 667enum { 668 NETIF_MSG_DRV = 0x0001, 669 NETIF_MSG_PROBE = 0x0002, 670 NETIF_MSG_LINK = 0x0004, 671 NETIF_MSG_TIMER = 0x0008, 672 NETIF_MSG_IFDOWN = 0x0010, 673 NETIF_MSG_IFUP = 0x0020, 674 NETIF_MSG_RX_ERR = 0x0040, 675 NETIF_MSG_TX_ERR = 0x0080, 676 NETIF_MSG_TX_QUEUED = 0x0100, 677 NETIF_MSG_INTR = 0x0200, 678 NETIF_MSG_TX_DONE = 0x0400, 679 NETIF_MSG_RX_STATUS = 0x0800, 680 NETIF_MSG_PKTDATA = 0x1000, 681 NETIF_MSG_HW = 0x2000, 682 NETIF_MSG_WOL = 0x4000, 683}; 684 685#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) 686#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) 687#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) 688#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) 689#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) 690#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) 691#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) 692#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) 693#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) 694#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) 695#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) 696#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) 697#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) 698#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) 699#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) 700 701/* Schedule rx intr now? */ 702 703static inline int netif_rx_schedule_prep(struct net_device *dev) 704{ 705 return netif_running(dev) && 706 !test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state); 707} 708 709/* Add interface to tail of rx poll list. This assumes that _prep has 710 * already been called and returned 1. 711 */ 712 713static inline void __netif_rx_schedule(struct net_device *dev) 714{ 715 unsigned long flags; 716 int cpu = smp_processor_id(); 717 718 local_irq_save(flags); 719 dev_hold(dev); 720 list_add_tail(&dev->poll_list, &softnet_data[cpu].poll_list); 721 if (dev->quota < 0) 722 dev->quota += dev->weight; 723 else 724 dev->quota = dev->weight; 725 __cpu_raise_softirq(cpu, NET_RX_SOFTIRQ); 726 local_irq_restore(flags); 727} 728 729/* Try to reschedule poll. Called by irq handler. */ 730 731static inline void netif_rx_schedule(struct net_device *dev) 732{ 733 if (netif_rx_schedule_prep(dev)) 734 __netif_rx_schedule(dev); 735} 736 737/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). 738 * Do not inline this? 739 */ 740static inline int netif_rx_reschedule(struct net_device *dev, int undo) 741{ 742 if (netif_rx_schedule_prep(dev)) { 743 unsigned long flags; 744 int cpu = smp_processor_id(); 745 746 dev->quota += undo; 747 748 local_irq_save(flags); 749 list_add_tail(&dev->poll_list, &softnet_data[cpu].poll_list); 750 __cpu_raise_softirq(cpu, NET_RX_SOFTIRQ); 751 local_irq_restore(flags); 752 return 1; 753 } 754 return 0; 755} 756 757/* Remove interface from poll list: it must be in the poll list 758 * on current cpu. This primitive is called by dev->poll(), when 759 * it completes the work. The device cannot be out of poll list at this 760 * moment, it is BUG(). 761 */ 762static inline void netif_rx_complete(struct net_device *dev) 763{ 764 unsigned long flags; 765 766 local_irq_save(flags); 767 if (!test_bit(__LINK_STATE_RX_SCHED, &dev->state)) BUG(); 768 list_del(&dev->poll_list); 769 clear_bit(__LINK_STATE_RX_SCHED, &dev->state); 770 local_irq_restore(flags); 771} 772 773/* These functions live elsewhere (drivers/net/net_init.c, but related) */ 774 775extern void ether_setup(struct net_device *dev); 776extern void fddi_setup(struct net_device *dev); 777extern void tr_setup(struct net_device *dev); 778extern void fc_setup(struct net_device *dev); 779extern void fc_freedev(struct net_device *dev); 780/* Support for loadable net-drivers */ 781extern int register_netdev(struct net_device *dev); 782extern void unregister_netdev(struct net_device *dev); 783/* Functions used for multicast support */ 784extern void dev_mc_upload(struct net_device *dev); 785extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all); 786extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly); 787extern void dev_mc_discard(struct net_device *dev); 788extern void dev_set_promiscuity(struct net_device *dev, int inc); 789extern void dev_set_allmulti(struct net_device *dev, int inc); 790extern void netdev_state_change(struct net_device *dev); 791/* Load a device via the kmod */ 792extern void dev_load(const char *name); 793extern void dev_mcast_init(void); 794extern int netdev_register_fc(struct net_device *dev, void (*stimul)(struct net_device *dev)); 795extern void netdev_unregister_fc(int bit); 796extern int netdev_max_backlog; 797extern int weight_p; 798extern unsigned long netdev_fc_xoff; 799extern atomic_t netdev_dropping; 800extern int netdev_set_master(struct net_device *dev, struct net_device *master); 801extern struct sk_buff * skb_checksum_help(struct sk_buff *skb); 802#ifdef CONFIG_NET_FASTROUTE 803extern int netdev_fastroute; 804extern int netdev_fastroute_obstacles; 805extern void dev_clear_fastroute(struct net_device *dev); 806#endif 807 808 809#endif /* __KERNEL__ */ 810 811#endif /* _LINUX_DEV_H */ 812