netfront.c revision 192871
1/* 2 * 3 * Copyright (c) 2004-2006 Kip Macy 4 * All rights reserved. 5 * 6 * 7 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 8 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 9 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 10 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 11 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 12 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 13 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 14 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 15 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 16 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 17 */ 18 19 20#include <sys/cdefs.h> 21__FBSDID("$FreeBSD: head/sys/dev/xen/netfront/netfront.c 192871 2009-05-27 02:49:08Z adrian $"); 22 23#include <sys/param.h> 24#include <sys/systm.h> 25#include <sys/sockio.h> 26#include <sys/mbuf.h> 27#include <sys/malloc.h> 28#include <sys/module.h> 29#include <sys/kernel.h> 30#include <sys/socket.h> 31#include <sys/sysctl.h> 32#include <sys/queue.h> 33#include <sys/sx.h> 34 35#include <net/if.h> 36#include <net/if_arp.h> 37#include <net/ethernet.h> 38#include <net/if_dl.h> 39#include <net/if_media.h> 40 41#include <net/bpf.h> 42 43#include <net/if_types.h> 44#include <net/if.h> 45 46#include <netinet/in_systm.h> 47#include <netinet/in.h> 48#include <netinet/ip.h> 49#include <netinet/if_ether.h> 50#if __FreeBSD_version >= 700000 51#include <netinet/tcp.h> 52#include <netinet/tcp_lro.h> 53#endif 54 55#include <vm/vm.h> 56#include <vm/pmap.h> 57 58#include <machine/clock.h> /* for DELAY */ 59#include <machine/bus.h> 60#include <machine/resource.h> 61#include <machine/frame.h> 62#include <machine/vmparam.h> 63 64#include <sys/bus.h> 65#include <sys/rman.h> 66 67#include <machine/intr_machdep.h> 68 69#include <machine/xen/xen-os.h> 70#include <machine/xen/xenfunc.h> 71#include <xen/hypervisor.h> 72#include <xen/xen_intr.h> 73#include <xen/evtchn.h> 74#include <xen/gnttab.h> 75#include <xen/interface/memory.h> 76#include <xen/interface/io/netif.h> 77#include <xen/xenbus/xenbusvar.h> 78 79#include <dev/xen/netfront/mbufq.h> 80 81#include "xenbus_if.h" 82 83#define XN_CSUM_FEATURES (CSUM_TCP | CSUM_UDP | CSUM_TSO) 84 85#define GRANT_INVALID_REF 0 86 87#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE) 88#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE) 89 90#if __FreeBSD_version >= 700000 91/* 92 * Should the driver do LRO on the RX end 93 * this can be toggled on the fly, but the 94 * interface must be reset (down/up) for it 95 * to take effect. 96 */ 97static int xn_enable_lro = 1; 98TUNABLE_INT("hw.xn.enable_lro", &xn_enable_lro); 99#else 100 101#define IFCAP_TSO4 0 102#define CSUM_TSO 0 103 104#endif 105 106#ifdef CONFIG_XEN 107static int MODPARM_rx_copy = 0; 108module_param_named(rx_copy, MODPARM_rx_copy, bool, 0); 109MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)"); 110static int MODPARM_rx_flip = 0; 111module_param_named(rx_flip, MODPARM_rx_flip, bool, 0); 112MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)"); 113#else 114static const int MODPARM_rx_copy = 1; 115static const int MODPARM_rx_flip = 0; 116#endif 117 118#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2) 119#define RX_COPY_THRESHOLD 256 120 121#define net_ratelimit() 0 122 123struct netfront_info; 124struct netfront_rx_info; 125 126static void xn_txeof(struct netfront_info *); 127static void xn_rxeof(struct netfront_info *); 128static void network_alloc_rx_buffers(struct netfront_info *); 129 130static void xn_tick_locked(struct netfront_info *); 131static void xn_tick(void *); 132 133static void xn_intr(void *); 134static void xn_start_locked(struct ifnet *); 135static void xn_start(struct ifnet *); 136static int xn_ioctl(struct ifnet *, u_long, caddr_t); 137static void xn_ifinit_locked(struct netfront_info *); 138static void xn_ifinit(void *); 139static void xn_stop(struct netfront_info *); 140#ifdef notyet 141static void xn_watchdog(struct ifnet *); 142#endif 143 144static void show_device(struct netfront_info *sc); 145#ifdef notyet 146static void netfront_closing(device_t dev); 147#endif 148static void netif_free(struct netfront_info *info); 149static int netfront_detach(device_t dev); 150 151static int talk_to_backend(device_t dev, struct netfront_info *info); 152static int create_netdev(device_t dev); 153static void netif_disconnect_backend(struct netfront_info *info); 154static int setup_device(device_t dev, struct netfront_info *info); 155static void end_access(int ref, void *page); 156 157/* Xenolinux helper functions */ 158int network_connect(struct netfront_info *); 159 160static void xn_free_rx_ring(struct netfront_info *); 161 162static void xn_free_tx_ring(struct netfront_info *); 163 164static int xennet_get_responses(struct netfront_info *np, 165 struct netfront_rx_info *rinfo, RING_IDX rp, struct mbuf **list, 166 int *pages_flipped_p); 167 168#define virt_to_mfn(x) (vtomach(x) >> PAGE_SHIFT) 169 170#define INVALID_P2M_ENTRY (~0UL) 171 172/* 173 * Mbuf pointers. We need these to keep track of the virtual addresses 174 * of our mbuf chains since we can only convert from virtual to physical, 175 * not the other way around. The size must track the free index arrays. 176 */ 177struct xn_chain_data { 178 struct mbuf *xn_tx_chain[NET_TX_RING_SIZE+1]; 179 int xn_tx_chain_cnt; 180 struct mbuf *xn_rx_chain[NET_RX_RING_SIZE+1]; 181}; 182 183 184struct net_device_stats 185{ 186 u_long rx_packets; /* total packets received */ 187 u_long tx_packets; /* total packets transmitted */ 188 u_long rx_bytes; /* total bytes received */ 189 u_long tx_bytes; /* total bytes transmitted */ 190 u_long rx_errors; /* bad packets received */ 191 u_long tx_errors; /* packet transmit problems */ 192 u_long rx_dropped; /* no space in linux buffers */ 193 u_long tx_dropped; /* no space available in linux */ 194 u_long multicast; /* multicast packets received */ 195 u_long collisions; 196 197 /* detailed rx_errors: */ 198 u_long rx_length_errors; 199 u_long rx_over_errors; /* receiver ring buff overflow */ 200 u_long rx_crc_errors; /* recved pkt with crc error */ 201 u_long rx_frame_errors; /* recv'd frame alignment error */ 202 u_long rx_fifo_errors; /* recv'r fifo overrun */ 203 u_long rx_missed_errors; /* receiver missed packet */ 204 205 /* detailed tx_errors */ 206 u_long tx_aborted_errors; 207 u_long tx_carrier_errors; 208 u_long tx_fifo_errors; 209 u_long tx_heartbeat_errors; 210 u_long tx_window_errors; 211 212 /* for cslip etc */ 213 u_long rx_compressed; 214 u_long tx_compressed; 215}; 216 217struct netfront_info { 218 219 struct ifnet *xn_ifp; 220#if __FreeBSD_version >= 700000 221 struct lro_ctrl xn_lro; 222#endif 223 224 struct net_device_stats stats; 225 u_int tx_full; 226 227 netif_tx_front_ring_t tx; 228 netif_rx_front_ring_t rx; 229 230 struct mtx tx_lock; 231 struct mtx rx_lock; 232 struct sx sc_lock; 233 234 u_int handle; 235 u_int irq; 236 u_int copying_receiver; 237 u_int carrier; 238 239 /* Receive-ring batched refills. */ 240#define RX_MIN_TARGET 32 241#define RX_MAX_TARGET NET_RX_RING_SIZE 242 int rx_min_target, rx_max_target, rx_target; 243 244 /* 245 * {tx,rx}_skbs store outstanding skbuffs. The first entry in each 246 * array is an index into a chain of free entries. 247 */ 248 249 grant_ref_t gref_tx_head; 250 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1]; 251 grant_ref_t gref_rx_head; 252 grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1]; 253 254#define TX_MAX_TARGET min(NET_RX_RING_SIZE, 256) 255 device_t xbdev; 256 int tx_ring_ref; 257 int rx_ring_ref; 258 uint8_t mac[ETHER_ADDR_LEN]; 259 struct xn_chain_data xn_cdata; /* mbufs */ 260 struct mbuf_head xn_rx_batch; /* head of the batch queue */ 261 262 int xn_if_flags; 263 struct callout xn_stat_ch; 264 265 u_long rx_pfn_array[NET_RX_RING_SIZE]; 266 multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1]; 267 mmu_update_t rx_mmu[NET_RX_RING_SIZE]; 268}; 269 270#define rx_mbufs xn_cdata.xn_rx_chain 271#define tx_mbufs xn_cdata.xn_tx_chain 272 273#define XN_LOCK_INIT(_sc, _name) \ 274 mtx_init(&(_sc)->tx_lock, #_name"_tx", "network transmit lock", MTX_DEF); \ 275 mtx_init(&(_sc)->rx_lock, #_name"_rx", "network receive lock", MTX_DEF); \ 276 sx_init(&(_sc)->sc_lock, #_name"_rx") 277 278#define XN_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_lock) 279#define XN_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_lock) 280 281#define XN_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_lock) 282#define XN_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_lock) 283 284#define XN_LOCK(_sc) sx_xlock(&(_sc)->sc_lock); 285#define XN_UNLOCK(_sc) sx_xunlock(&(_sc)->sc_lock); 286 287#define XN_LOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_lock, SX_LOCKED); 288#define XN_RX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rx_lock, MA_OWNED); 289#define XN_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_lock, MA_OWNED); 290#define XN_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_lock); \ 291 mtx_destroy(&(_sc)->tx_lock); \ 292 sx_destroy(&(_sc)->sc_lock); 293 294struct netfront_rx_info { 295 struct netif_rx_response rx; 296 struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; 297}; 298 299#define netfront_carrier_on(netif) ((netif)->carrier = 1) 300#define netfront_carrier_off(netif) ((netif)->carrier = 0) 301#define netfront_carrier_ok(netif) ((netif)->carrier) 302 303/* Access macros for acquiring freeing slots in xn_free_{tx,rx}_idxs[]. */ 304 305 306 307/* 308 * Access macros for acquiring freeing slots in tx_skbs[]. 309 */ 310 311static inline void 312add_id_to_freelist(struct mbuf **list, unsigned short id) 313{ 314 KASSERT(id != 0, ("add_id_to_freelist: the head item (0) must always be free.")); 315 list[id] = list[0]; 316 list[0] = (void *)(u_long)id; 317} 318 319static inline unsigned short 320get_id_from_freelist(struct mbuf **list) 321{ 322 u_int id = (u_int)(u_long)list[0]; 323 KASSERT(id != 0, ("get_id_from_freelist: the head item (0) must always remain free.")); 324 list[0] = list[id]; 325 return (id); 326} 327 328static inline int 329xennet_rxidx(RING_IDX idx) 330{ 331 return idx & (NET_RX_RING_SIZE - 1); 332} 333 334static inline struct mbuf * 335xennet_get_rx_mbuf(struct netfront_info *np, 336 RING_IDX ri) 337{ 338 int i = xennet_rxidx(ri); 339 struct mbuf *m; 340 341 m = np->rx_mbufs[i]; 342 np->rx_mbufs[i] = NULL; 343 return (m); 344} 345 346static inline grant_ref_t 347xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri) 348{ 349 int i = xennet_rxidx(ri); 350 grant_ref_t ref = np->grant_rx_ref[i]; 351 np->grant_rx_ref[i] = GRANT_INVALID_REF; 352 return ref; 353} 354 355#ifdef DEBUG 356 357#endif 358#define IPRINTK(fmt, args...) \ 359 printf("[XEN] " fmt, ##args) 360#define WPRINTK(fmt, args...) \ 361 printf("[XEN] " fmt, ##args) 362#if 0 363#define DPRINTK(fmt, args...) \ 364 printf("[XEN] %s: " fmt, __func__, ##args) 365#else 366#define DPRINTK(fmt, args...) 367#endif 368 369/** 370 * Read the 'mac' node at the given device's node in the store, and parse that 371 * as colon-separated octets, placing result the given mac array. mac must be 372 * a preallocated array of length ETH_ALEN (as declared in linux/if_ether.h). 373 * Return 0 on success, or errno on error. 374 */ 375static int 376xen_net_read_mac(device_t dev, uint8_t mac[]) 377{ 378 int error, i; 379 char *s, *e, *macstr; 380 381 error = xenbus_read(XBT_NIL, xenbus_get_node(dev), "mac", NULL, 382 (void **) &macstr); 383 if (error) 384 return (error); 385 386 s = macstr; 387 for (i = 0; i < ETHER_ADDR_LEN; i++) { 388 mac[i] = strtoul(s, &e, 16); 389 if (s == e || (e[0] != ':' && e[0] != 0)) { 390 free(macstr, M_DEVBUF); 391 return (ENOENT); 392 } 393 s = &e[1]; 394 } 395 free(macstr, M_DEVBUF); 396 return (0); 397} 398 399/** 400 * Entry point to this code when a new device is created. Allocate the basic 401 * structures and the ring buffers for communication with the backend, and 402 * inform the backend of the appropriate details for those. Switch to 403 * Connected state. 404 */ 405static int 406netfront_probe(device_t dev) 407{ 408 409 if (!strcmp(xenbus_get_type(dev), "vif")) { 410 device_set_desc(dev, "Virtual Network Interface"); 411 return (0); 412 } 413 414 return (ENXIO); 415} 416 417static int 418netfront_attach(device_t dev) 419{ 420 int err; 421 422 err = create_netdev(dev); 423 if (err) { 424 xenbus_dev_fatal(dev, err, "creating netdev"); 425 return err; 426 } 427 428#if __FreeBSD_version >= 700000 429 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 430 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 431 OID_AUTO, "enable_lro", CTLTYPE_INT|CTLFLAG_RW, 432 &xn_enable_lro, 0, "Large Receive Offload"); 433#endif 434 435 return 0; 436} 437 438 439/** 440 * We are reconnecting to the backend, due to a suspend/resume, or a backend 441 * driver restart. We tear down our netif structure and recreate it, but 442 * leave the device-layer structures intact so that this is transparent to the 443 * rest of the kernel. 444 */ 445static int 446netfront_resume(device_t dev) 447{ 448 struct netfront_info *info = device_get_softc(dev); 449 450 netif_disconnect_backend(info); 451 return (0); 452} 453 454 455/* Common code used when first setting up, and when resuming. */ 456static int 457talk_to_backend(device_t dev, struct netfront_info *info) 458{ 459 const char *message; 460 struct xenbus_transaction xbt; 461 const char *node = xenbus_get_node(dev); 462 int err; 463 464 err = xen_net_read_mac(dev, info->mac); 465 if (err) { 466 xenbus_dev_fatal(dev, err, "parsing %s/mac", node); 467 goto out; 468 } 469 470 /* Create shared ring, alloc event channel. */ 471 err = setup_device(dev, info); 472 if (err) 473 goto out; 474 475 again: 476 err = xenbus_transaction_start(&xbt); 477 if (err) { 478 xenbus_dev_fatal(dev, err, "starting transaction"); 479 goto destroy_ring; 480 } 481 err = xenbus_printf(xbt, node, "tx-ring-ref","%u", 482 info->tx_ring_ref); 483 if (err) { 484 message = "writing tx ring-ref"; 485 goto abort_transaction; 486 } 487 err = xenbus_printf(xbt, node, "rx-ring-ref","%u", 488 info->rx_ring_ref); 489 if (err) { 490 message = "writing rx ring-ref"; 491 goto abort_transaction; 492 } 493 err = xenbus_printf(xbt, node, 494 "event-channel", "%u", irq_to_evtchn_port(info->irq)); 495 if (err) { 496 message = "writing event-channel"; 497 goto abort_transaction; 498 } 499 err = xenbus_printf(xbt, node, "request-rx-copy", "%u", 500 info->copying_receiver); 501 if (err) { 502 message = "writing request-rx-copy"; 503 goto abort_transaction; 504 } 505 err = xenbus_printf(xbt, node, "feature-rx-notify", "%d", 1); 506 if (err) { 507 message = "writing feature-rx-notify"; 508 goto abort_transaction; 509 } 510 err = xenbus_printf(xbt, node, "feature-sg", "%d", 1); 511 if (err) { 512 message = "writing feature-sg"; 513 goto abort_transaction; 514 } 515#if __FreeBSD_version >= 700000 516 err = xenbus_printf(xbt, node, "feature-gso-tcpv4", "%d", 1); 517 if (err) { 518 message = "writing feature-gso-tcpv4"; 519 goto abort_transaction; 520 } 521#endif 522 523 err = xenbus_transaction_end(xbt, 0); 524 if (err) { 525 if (err == EAGAIN) 526 goto again; 527 xenbus_dev_fatal(dev, err, "completing transaction"); 528 goto destroy_ring; 529 } 530 531 return 0; 532 533 abort_transaction: 534 xenbus_transaction_end(xbt, 1); 535 xenbus_dev_fatal(dev, err, "%s", message); 536 destroy_ring: 537 netif_free(info); 538 out: 539 return err; 540} 541 542 543static int 544setup_device(device_t dev, struct netfront_info *info) 545{ 546 netif_tx_sring_t *txs; 547 netif_rx_sring_t *rxs; 548 int error; 549 struct ifnet *ifp; 550 551 ifp = info->xn_ifp; 552 553 info->tx_ring_ref = GRANT_INVALID_REF; 554 info->rx_ring_ref = GRANT_INVALID_REF; 555 info->rx.sring = NULL; 556 info->tx.sring = NULL; 557 info->irq = 0; 558 559 txs = (netif_tx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO); 560 if (!txs) { 561 error = ENOMEM; 562 xenbus_dev_fatal(dev, error, "allocating tx ring page"); 563 goto fail; 564 } 565 SHARED_RING_INIT(txs); 566 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); 567 error = xenbus_grant_ring(dev, virt_to_mfn(txs), &info->tx_ring_ref); 568 if (error) 569 goto fail; 570 571 rxs = (netif_rx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO); 572 if (!rxs) { 573 error = ENOMEM; 574 xenbus_dev_fatal(dev, error, "allocating rx ring page"); 575 goto fail; 576 } 577 SHARED_RING_INIT(rxs); 578 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); 579 580 error = xenbus_grant_ring(dev, virt_to_mfn(rxs), &info->rx_ring_ref); 581 if (error) 582 goto fail; 583 584 error = bind_listening_port_to_irqhandler(xenbus_get_otherend_id(dev), 585 "xn", xn_intr, info, INTR_TYPE_NET | INTR_MPSAFE, &info->irq); 586 587 if (error) { 588 xenbus_dev_fatal(dev, error, 589 "bind_evtchn_to_irqhandler failed"); 590 goto fail; 591 } 592 593 show_device(info); 594 595 return (0); 596 597 fail: 598 netif_free(info); 599 return (error); 600} 601 602/** 603 * If this interface has an ipv4 address, send an arp for it. This 604 * helps to get the network going again after migrating hosts. 605 */ 606static void 607netfront_send_fake_arp(device_t dev, struct netfront_info *info) 608{ 609 struct ifnet *ifp; 610 struct ifaddr *ifa; 611 612 ifp = info->xn_ifp; 613 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 614 if (ifa->ifa_addr->sa_family == AF_INET) { 615 arp_ifinit(ifp, ifa); 616 } 617 } 618} 619 620/** 621 * Callback received when the backend's state changes. 622 */ 623static void 624netfront_backend_changed(device_t dev, XenbusState newstate) 625{ 626 struct netfront_info *sc = device_get_softc(dev); 627 628 DPRINTK("newstate=%d\n", newstate); 629 630 switch (newstate) { 631 case XenbusStateInitialising: 632 case XenbusStateInitialised: 633 case XenbusStateConnected: 634 case XenbusStateUnknown: 635 case XenbusStateClosed: 636 case XenbusStateReconfigured: 637 case XenbusStateReconfiguring: 638 break; 639 case XenbusStateInitWait: 640 if (xenbus_get_state(dev) != XenbusStateInitialising) 641 break; 642 if (network_connect(sc) != 0) 643 break; 644 xenbus_set_state(dev, XenbusStateConnected); 645 netfront_send_fake_arp(dev, sc); 646 break; 647 case XenbusStateClosing: 648 xenbus_set_state(dev, XenbusStateClosed); 649 break; 650 } 651} 652 653static void 654xn_free_rx_ring(struct netfront_info *sc) 655{ 656#if 0 657 int i; 658 659 for (i = 0; i < NET_RX_RING_SIZE; i++) { 660 if (sc->xn_cdata.xn_rx_chain[i] != NULL) { 661 m_freem(sc->xn_cdata.xn_rx_chain[i]); 662 sc->xn_cdata.xn_rx_chain[i] = NULL; 663 } 664 } 665 666 sc->rx.rsp_cons = 0; 667 sc->xn_rx_if->req_prod = 0; 668 sc->xn_rx_if->event = sc->rx.rsp_cons ; 669#endif 670} 671 672static void 673xn_free_tx_ring(struct netfront_info *sc) 674{ 675#if 0 676 int i; 677 678 for (i = 0; i < NET_TX_RING_SIZE; i++) { 679 if (sc->xn_cdata.xn_tx_chain[i] != NULL) { 680 m_freem(sc->xn_cdata.xn_tx_chain[i]); 681 sc->xn_cdata.xn_tx_chain[i] = NULL; 682 } 683 } 684 685 return; 686#endif 687} 688 689/* 690 * Do some brief math on the number of descriptors available to 691 * determine how many slots are available. 692 * 693 * Firstly - wouldn't something with RING_FREE_REQUESTS() be more applicable? 694 * Secondly - MAX_SKB_FRAGS is a Linux construct which may not apply here. 695 * Thirdly - it isn't used here anyway; the magic constant '24' is possibly 696 * wrong? 697 * The "2" is presumably to ensure there are also enough slots available for 698 * the ring entries used for "options" (eg, the TSO entry before a packet 699 * is queued); I'm not sure why its 2 and not 1. Perhaps to make sure there's 700 * a "free" node in the tx mbuf list (node 0) to represent the freelist? 701 * 702 * This only figures out whether any xenbus ring descriptors are available; 703 * it doesn't at all reflect how many tx mbuf ring descriptors are also 704 * available. 705 */ 706static inline int 707netfront_tx_slot_available(struct netfront_info *np) 708{ 709 return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < 710 (TX_MAX_TARGET - /* MAX_SKB_FRAGS */ 24 - 2)); 711} 712static void 713netif_release_tx_bufs(struct netfront_info *np) 714{ 715 struct mbuf *m; 716 int i; 717 718 for (i = 1; i <= NET_TX_RING_SIZE; i++) { 719 m = np->xn_cdata.xn_tx_chain[i]; 720 721 if (((u_long)m) < KERNBASE) 722 continue; 723 gnttab_grant_foreign_access_ref(np->grant_tx_ref[i], 724 xenbus_get_otherend_id(np->xbdev), 725 virt_to_mfn(mtod(m, vm_offset_t)), 726 GNTMAP_readonly); 727 gnttab_release_grant_reference(&np->gref_tx_head, 728 np->grant_tx_ref[i]); 729 np->grant_tx_ref[i] = GRANT_INVALID_REF; 730 add_id_to_freelist(np->tx_mbufs, i); 731 np->xn_cdata.xn_tx_chain_cnt--; 732 if (np->xn_cdata.xn_tx_chain_cnt < 0) { 733 panic("netif_release_tx_bufs: tx_chain_cnt must be >= 0"); 734 } 735 m_freem(m); 736 } 737} 738 739static void 740network_alloc_rx_buffers(struct netfront_info *sc) 741{ 742 int otherend_id = xenbus_get_otherend_id(sc->xbdev); 743 unsigned short id; 744 struct mbuf *m_new; 745 int i, batch_target, notify; 746 RING_IDX req_prod; 747 struct xen_memory_reservation reservation; 748 grant_ref_t ref; 749 int nr_flips; 750 netif_rx_request_t *req; 751 vm_offset_t vaddr; 752 u_long pfn; 753 754 req_prod = sc->rx.req_prod_pvt; 755 756 if (unlikely(sc->carrier == 0)) 757 return; 758 759 /* 760 * Allocate skbuffs greedily, even though we batch updates to the 761 * receive ring. This creates a less bursty demand on the memory 762 * allocator, so should reduce the chance of failed allocation 763 * requests both for ourself and for other kernel subsystems. 764 */ 765 batch_target = sc->rx_target - (req_prod - sc->rx.rsp_cons); 766 for (i = mbufq_len(&sc->xn_rx_batch); i < batch_target; i++) { 767 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 768 if (m_new == NULL) 769 goto no_mbuf; 770 771 m_cljget(m_new, M_DONTWAIT, MJUMPAGESIZE); 772 if ((m_new->m_flags & M_EXT) == 0) { 773 m_freem(m_new); 774 775no_mbuf: 776 if (i != 0) 777 goto refill; 778 /* 779 * XXX set timer 780 */ 781 break; 782 } 783 m_new->m_len = m_new->m_pkthdr.len = MJUMPAGESIZE; 784 785 /* queue the mbufs allocated */ 786 mbufq_tail(&sc->xn_rx_batch, m_new); 787 } 788 789 /* Is the batch large enough to be worthwhile? */ 790 if (i < (sc->rx_target/2)) { 791 if (req_prod >sc->rx.sring->req_prod) 792 goto push; 793 return; 794 } 795 /* Adjust floating fill target if we risked running out of buffers. */ 796 if ( ((req_prod - sc->rx.sring->rsp_prod) < (sc->rx_target / 4)) && 797 ((sc->rx_target *= 2) > sc->rx_max_target) ) 798 sc->rx_target = sc->rx_max_target; 799 800refill: 801 for (nr_flips = i = 0; ; i++) { 802 if ((m_new = mbufq_dequeue(&sc->xn_rx_batch)) == NULL) 803 break; 804 805 m_new->m_ext.ext_arg1 = (vm_paddr_t *)(uintptr_t)( 806 vtophys(m_new->m_ext.ext_buf) >> PAGE_SHIFT); 807 808 id = xennet_rxidx(req_prod + i); 809 810 KASSERT(sc->xn_cdata.xn_rx_chain[id] == NULL, 811 ("non-NULL xm_rx_chain")); 812 sc->xn_cdata.xn_rx_chain[id] = m_new; 813 814 ref = gnttab_claim_grant_reference(&sc->gref_rx_head); 815 KASSERT((short)ref >= 0, ("negative ref")); 816 sc->grant_rx_ref[id] = ref; 817 818 vaddr = mtod(m_new, vm_offset_t); 819 pfn = vtophys(vaddr) >> PAGE_SHIFT; 820 req = RING_GET_REQUEST(&sc->rx, req_prod + i); 821 822 if (sc->copying_receiver == 0) { 823 gnttab_grant_foreign_transfer_ref(ref, 824 otherend_id, pfn); 825 sc->rx_pfn_array[nr_flips] = PFNTOMFN(pfn); 826 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 827 /* Remove this page before passing 828 * back to Xen. 829 */ 830 set_phys_to_machine(pfn, INVALID_P2M_ENTRY); 831 MULTI_update_va_mapping(&sc->rx_mcl[i], 832 vaddr, 0, 0); 833 } 834 nr_flips++; 835 } else { 836 gnttab_grant_foreign_access_ref(ref, 837 otherend_id, 838 PFNTOMFN(pfn), 0); 839 } 840 req->id = id; 841 req->gref = ref; 842 843 sc->rx_pfn_array[i] = 844 vtomach(mtod(m_new,vm_offset_t)) >> PAGE_SHIFT; 845 } 846 847 KASSERT(i, ("no mbufs processed")); /* should have returned earlier */ 848 KASSERT(mbufq_len(&sc->xn_rx_batch) == 0, ("not all mbufs processed")); 849 /* 850 * We may have allocated buffers which have entries outstanding 851 * in the page * update queue -- make sure we flush those first! 852 */ 853 PT_UPDATES_FLUSH(); 854 if (nr_flips != 0) { 855#ifdef notyet 856 /* Tell the ballon driver what is going on. */ 857 balloon_update_driver_allowance(i); 858#endif 859 set_xen_guest_handle(reservation.extent_start, sc->rx_pfn_array); 860 reservation.nr_extents = i; 861 reservation.extent_order = 0; 862 reservation.address_bits = 0; 863 reservation.domid = DOMID_SELF; 864 865 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 866 867 /* After all PTEs have been zapped, flush the TLB. */ 868 sc->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = 869 UVMF_TLB_FLUSH|UVMF_ALL; 870 871 /* Give away a batch of pages. */ 872 sc->rx_mcl[i].op = __HYPERVISOR_memory_op; 873 sc->rx_mcl[i].args[0] = XENMEM_decrease_reservation; 874 sc->rx_mcl[i].args[1] = (u_long)&reservation; 875 /* Zap PTEs and give away pages in one big multicall. */ 876 (void)HYPERVISOR_multicall(sc->rx_mcl, i+1); 877 878 /* Check return status of HYPERVISOR_dom_mem_op(). */ 879 if (unlikely(sc->rx_mcl[i].result != i)) 880 panic("Unable to reduce memory reservation\n"); 881 } else { 882 if (HYPERVISOR_memory_op( 883 XENMEM_decrease_reservation, &reservation) 884 != i) 885 panic("Unable to reduce memory " 886 "reservation\n"); 887 } 888 } else { 889 wmb(); 890 } 891 892 /* Above is a suitable barrier to ensure backend will see requests. */ 893 sc->rx.req_prod_pvt = req_prod + i; 894push: 895 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->rx, notify); 896 if (notify) 897 notify_remote_via_irq(sc->irq); 898} 899 900static void 901xn_rxeof(struct netfront_info *np) 902{ 903 struct ifnet *ifp; 904#if __FreeBSD_version >= 700000 905 struct lro_ctrl *lro = &np->xn_lro; 906 struct lro_entry *queued; 907#endif 908 struct netfront_rx_info rinfo; 909 struct netif_rx_response *rx = &rinfo.rx; 910 struct netif_extra_info *extras = rinfo.extras; 911 RING_IDX i, rp; 912 multicall_entry_t *mcl; 913 struct mbuf *m; 914 struct mbuf_head rxq, errq; 915 int err, pages_flipped = 0, work_to_do; 916 917 do { 918 XN_RX_LOCK_ASSERT(np); 919 if (!netfront_carrier_ok(np)) 920 return; 921 922 mbufq_init(&errq); 923 mbufq_init(&rxq); 924 925 ifp = np->xn_ifp; 926 927 rp = np->rx.sring->rsp_prod; 928 rmb(); /* Ensure we see queued responses up to 'rp'. */ 929 930 i = np->rx.rsp_cons; 931 while ((i != rp)) { 932 memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); 933 memset(extras, 0, sizeof(rinfo.extras)); 934 935 m = NULL; 936 err = xennet_get_responses(np, &rinfo, rp, &m, 937 &pages_flipped); 938 939 if (unlikely(err)) { 940 if (m) 941 mbufq_tail(&errq, m); 942 np->stats.rx_errors++; 943 i = np->rx.rsp_cons; 944 continue; 945 } 946 947 m->m_pkthdr.rcvif = ifp; 948 if ( rx->flags & NETRXF_data_validated ) { 949 /* Tell the stack the checksums are okay */ 950 /* 951 * XXX this isn't necessarily the case - need to add 952 * check 953 */ 954 955 m->m_pkthdr.csum_flags |= 956 (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID 957 | CSUM_PSEUDO_HDR); 958 m->m_pkthdr.csum_data = 0xffff; 959 } 960 961 np->stats.rx_packets++; 962 np->stats.rx_bytes += m->m_pkthdr.len; 963 964 mbufq_tail(&rxq, m); 965 np->rx.rsp_cons = ++i; 966 } 967 968 if (pages_flipped) { 969 /* Some pages are no longer absent... */ 970#ifdef notyet 971 balloon_update_driver_allowance(-pages_flipped); 972#endif 973 /* Do all the remapping work, and M->P updates, in one big 974 * hypercall. 975 */ 976 if (!!xen_feature(XENFEAT_auto_translated_physmap)) { 977 mcl = np->rx_mcl + pages_flipped; 978 mcl->op = __HYPERVISOR_mmu_update; 979 mcl->args[0] = (u_long)np->rx_mmu; 980 mcl->args[1] = pages_flipped; 981 mcl->args[2] = 0; 982 mcl->args[3] = DOMID_SELF; 983 (void)HYPERVISOR_multicall(np->rx_mcl, 984 pages_flipped + 1); 985 } 986 } 987 988 while ((m = mbufq_dequeue(&errq))) 989 m_freem(m); 990 991 /* 992 * Process all the mbufs after the remapping is complete. 993 * Break the mbuf chain first though. 994 */ 995 while ((m = mbufq_dequeue(&rxq)) != NULL) { 996 ifp->if_ipackets++; 997 998 /* 999 * Do we really need to drop the rx lock? 1000 */ 1001 XN_RX_UNLOCK(np); 1002#if __FreeBSD_version >= 700000 1003 /* Use LRO if possible */ 1004 if ((ifp->if_capenable & IFCAP_LRO) == 0 || 1005 lro->lro_cnt == 0 || tcp_lro_rx(lro, m, 0)) { 1006 /* 1007 * If LRO fails, pass up to the stack 1008 * directly. 1009 */ 1010 (*ifp->if_input)(ifp, m); 1011 } 1012#else 1013 (*ifp->if_input)(ifp, m); 1014#endif 1015 XN_RX_LOCK(np); 1016 } 1017 1018 np->rx.rsp_cons = i; 1019 1020#if __FreeBSD_version >= 700000 1021 /* 1022 * Flush any outstanding LRO work 1023 */ 1024 while (!SLIST_EMPTY(&lro->lro_active)) { 1025 queued = SLIST_FIRST(&lro->lro_active); 1026 SLIST_REMOVE_HEAD(&lro->lro_active, next); 1027 tcp_lro_flush(lro, queued); 1028 } 1029#endif 1030 1031#if 0 1032 /* If we get a callback with very few responses, reduce fill target. */ 1033 /* NB. Note exponential increase, linear decrease. */ 1034 if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > 1035 ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target)) 1036 np->rx_target = np->rx_min_target; 1037#endif 1038 1039 network_alloc_rx_buffers(np); 1040 1041 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, work_to_do); 1042 } while (work_to_do); 1043} 1044 1045static void 1046xn_txeof(struct netfront_info *np) 1047{ 1048 RING_IDX i, prod; 1049 unsigned short id; 1050 struct ifnet *ifp; 1051 netif_tx_response_t *txr; 1052 struct mbuf *m; 1053 1054 XN_TX_LOCK_ASSERT(np); 1055 1056 if (!netfront_carrier_ok(np)) 1057 return; 1058 1059 ifp = np->xn_ifp; 1060 ifp->if_timer = 0; 1061 1062 do { 1063 prod = np->tx.sring->rsp_prod; 1064 rmb(); /* Ensure we see responses up to 'rp'. */ 1065 1066 for (i = np->tx.rsp_cons; i != prod; i++) { 1067 txr = RING_GET_RESPONSE(&np->tx, i); 1068 if (txr->status == NETIF_RSP_NULL) 1069 continue; 1070 1071 id = txr->id; 1072 m = np->xn_cdata.xn_tx_chain[id]; 1073 KASSERT(m != NULL, ("mbuf not found in xn_tx_chain")); 1074 M_ASSERTVALID(m); 1075 1076 /* 1077 * Increment packet count if this is the last 1078 * mbuf of the chain. 1079 */ 1080 if (!m->m_next) 1081 ifp->if_opackets++; 1082 if (unlikely(gnttab_query_foreign_access( 1083 np->grant_tx_ref[id]) != 0)) { 1084 printf("network_tx_buf_gc: warning " 1085 "-- grant still in use by backend " 1086 "domain.\n"); 1087 goto out; 1088 } 1089 gnttab_end_foreign_access_ref( 1090 np->grant_tx_ref[id]); 1091 gnttab_release_grant_reference( 1092 &np->gref_tx_head, np->grant_tx_ref[id]); 1093 np->grant_tx_ref[id] = GRANT_INVALID_REF; 1094 1095 np->xn_cdata.xn_tx_chain[id] = NULL; 1096 add_id_to_freelist(np->xn_cdata.xn_tx_chain, id); 1097 np->xn_cdata.xn_tx_chain_cnt--; 1098 if (np->xn_cdata.xn_tx_chain_cnt < 0) { 1099 panic("netif_release_tx_bufs: tx_chain_cnt must be >= 0"); 1100 } 1101 m_free(m); 1102 } 1103 np->tx.rsp_cons = prod; 1104 1105 /* 1106 * Set a new event, then check for race with update of 1107 * tx_cons. Note that it is essential to schedule a 1108 * callback, no matter how few buffers are pending. Even if 1109 * there is space in the transmit ring, higher layers may 1110 * be blocked because too much data is outstanding: in such 1111 * cases notification from Xen is likely to be the only kick 1112 * that we'll get. 1113 */ 1114 np->tx.sring->rsp_event = 1115 prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; 1116 1117 mb(); 1118 1119 } while (prod != np->tx.sring->rsp_prod); 1120 1121 out: 1122 if (np->tx_full && 1123 ((np->tx.sring->req_prod - prod) < NET_TX_RING_SIZE)) { 1124 np->tx_full = 0; 1125#if 0 1126 if (np->user_state == UST_OPEN) 1127 netif_wake_queue(dev); 1128#endif 1129 } 1130 1131} 1132 1133static void 1134xn_intr(void *xsc) 1135{ 1136 struct netfront_info *np = xsc; 1137 struct ifnet *ifp = np->xn_ifp; 1138 1139#if 0 1140 if (!(np->rx.rsp_cons != np->rx.sring->rsp_prod && 1141 likely(netfront_carrier_ok(np)) && 1142 ifp->if_drv_flags & IFF_DRV_RUNNING)) 1143 return; 1144#endif 1145 if (np->tx.rsp_cons != np->tx.sring->rsp_prod) { 1146 XN_TX_LOCK(np); 1147 xn_txeof(np); 1148 XN_TX_UNLOCK(np); 1149 } 1150 1151 XN_RX_LOCK(np); 1152 xn_rxeof(np); 1153 XN_RX_UNLOCK(np); 1154 1155 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1156 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1157 xn_start(ifp); 1158} 1159 1160 1161static void 1162xennet_move_rx_slot(struct netfront_info *np, struct mbuf *m, 1163 grant_ref_t ref) 1164{ 1165 int new = xennet_rxidx(np->rx.req_prod_pvt); 1166 1167 KASSERT(np->rx_mbufs[new] == NULL, ("rx_mbufs != NULL")); 1168 np->rx_mbufs[new] = m; 1169 np->grant_rx_ref[new] = ref; 1170 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; 1171 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; 1172 np->rx.req_prod_pvt++; 1173} 1174 1175static int 1176xennet_get_extras(struct netfront_info *np, 1177 struct netif_extra_info *extras, RING_IDX rp) 1178{ 1179 struct netif_extra_info *extra; 1180 RING_IDX cons = np->rx.rsp_cons; 1181 1182 int err = 0; 1183 1184 do { 1185 struct mbuf *m; 1186 grant_ref_t ref; 1187 1188 if (unlikely(cons + 1 == rp)) { 1189#if 0 1190 if (net_ratelimit()) 1191 WPRINTK("Missing extra info\n"); 1192#endif 1193 err = -EINVAL; 1194 break; 1195 } 1196 1197 extra = (struct netif_extra_info *) 1198 RING_GET_RESPONSE(&np->rx, ++cons); 1199 1200 if (unlikely(!extra->type || 1201 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 1202#if 0 1203 if (net_ratelimit()) 1204 WPRINTK("Invalid extra type: %d\n", 1205 extra->type); 1206#endif 1207 err = -EINVAL; 1208 } else { 1209 memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); 1210 } 1211 1212 m = xennet_get_rx_mbuf(np, cons); 1213 ref = xennet_get_rx_ref(np, cons); 1214 xennet_move_rx_slot(np, m, ref); 1215 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); 1216 1217 np->rx.rsp_cons = cons; 1218 return err; 1219} 1220 1221static int 1222xennet_get_responses(struct netfront_info *np, 1223 struct netfront_rx_info *rinfo, RING_IDX rp, 1224 struct mbuf **list, 1225 int *pages_flipped_p) 1226{ 1227 int pages_flipped = *pages_flipped_p; 1228 struct mmu_update *mmu; 1229 struct multicall_entry *mcl; 1230 struct netif_rx_response *rx = &rinfo->rx; 1231 struct netif_extra_info *extras = rinfo->extras; 1232 RING_IDX cons = np->rx.rsp_cons; 1233 struct mbuf *m, *m0, *m_prev; 1234 grant_ref_t ref = xennet_get_rx_ref(np, cons); 1235 int max = 5 /* MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD) */; 1236 int frags = 1; 1237 int err = 0; 1238 u_long ret; 1239 1240 m0 = m = m_prev = xennet_get_rx_mbuf(np, cons); 1241 1242 1243 if (rx->flags & NETRXF_extra_info) { 1244 err = xennet_get_extras(np, extras, rp); 1245 cons = np->rx.rsp_cons; 1246 } 1247 1248 1249 if (m0 != NULL) { 1250 m0->m_pkthdr.len = 0; 1251 m0->m_next = NULL; 1252 } 1253 1254 for (;;) { 1255 u_long mfn; 1256 1257#if 0 1258 printf("rx->status=%hd rx->offset=%hu frags=%u\n", 1259 rx->status, rx->offset, frags); 1260#endif 1261 if (unlikely(rx->status < 0 || 1262 rx->offset + rx->status > PAGE_SIZE)) { 1263#if 0 1264 if (net_ratelimit()) 1265 WPRINTK("rx->offset: %x, size: %u\n", 1266 rx->offset, rx->status); 1267#endif 1268 xennet_move_rx_slot(np, m, ref); 1269 err = -EINVAL; 1270 goto next; 1271 } 1272 1273 /* 1274 * This definitely indicates a bug, either in this driver or in 1275 * the backend driver. In future this should flag the bad 1276 * situation to the system controller to reboot the backed. 1277 */ 1278 if (ref == GRANT_INVALID_REF) { 1279#if 0 1280 if (net_ratelimit()) 1281 WPRINTK("Bad rx response id %d.\n", rx->id); 1282#endif 1283 err = -EINVAL; 1284 goto next; 1285 } 1286 1287 if (!np->copying_receiver) { 1288 /* Memory pressure, insufficient buffer 1289 * headroom, ... 1290 */ 1291 if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) { 1292 if (net_ratelimit()) 1293 WPRINTK("Unfulfilled rx req " 1294 "(id=%d, st=%d).\n", 1295 rx->id, rx->status); 1296 xennet_move_rx_slot(np, m, ref); 1297 err = -ENOMEM; 1298 goto next; 1299 } 1300 1301 if (!xen_feature( XENFEAT_auto_translated_physmap)) { 1302 /* Remap the page. */ 1303 void *vaddr = mtod(m, void *); 1304 uint32_t pfn; 1305 1306 mcl = np->rx_mcl + pages_flipped; 1307 mmu = np->rx_mmu + pages_flipped; 1308 1309 MULTI_update_va_mapping(mcl, (u_long)vaddr, 1310 (((vm_paddr_t)mfn) << PAGE_SHIFT) | PG_RW | 1311 PG_V | PG_M | PG_A, 0); 1312 pfn = (uintptr_t)m->m_ext.ext_arg1; 1313 mmu->ptr = ((vm_paddr_t)mfn << PAGE_SHIFT) | 1314 MMU_MACHPHYS_UPDATE; 1315 mmu->val = pfn; 1316 1317 set_phys_to_machine(pfn, mfn); 1318 } 1319 pages_flipped++; 1320 } else { 1321 ret = gnttab_end_foreign_access_ref(ref); 1322 KASSERT(ret, ("ret != 0")); 1323 } 1324 1325 gnttab_release_grant_reference(&np->gref_rx_head, ref); 1326 1327next: 1328 if (m == NULL) 1329 break; 1330 1331 m->m_len = rx->status; 1332 m->m_data += rx->offset; 1333 m0->m_pkthdr.len += rx->status; 1334 1335 if (!(rx->flags & NETRXF_more_data)) 1336 break; 1337 1338 if (cons + frags == rp) { 1339 if (net_ratelimit()) 1340 WPRINTK("Need more frags\n"); 1341 err = -ENOENT; 1342 break; 1343 } 1344 m_prev = m; 1345 1346 rx = RING_GET_RESPONSE(&np->rx, cons + frags); 1347 m = xennet_get_rx_mbuf(np, cons + frags); 1348 1349 m_prev->m_next = m; 1350 m->m_next = NULL; 1351 ref = xennet_get_rx_ref(np, cons + frags); 1352 frags++; 1353 } 1354 *list = m0; 1355 1356 if (unlikely(frags > max)) { 1357 if (net_ratelimit()) 1358 WPRINTK("Too many frags\n"); 1359 err = -E2BIG; 1360 } 1361 1362 if (unlikely(err)) 1363 np->rx.rsp_cons = cons + frags; 1364 1365 *pages_flipped_p = pages_flipped; 1366 1367 return err; 1368} 1369 1370static void 1371xn_tick_locked(struct netfront_info *sc) 1372{ 1373 XN_RX_LOCK_ASSERT(sc); 1374 callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc); 1375 1376 /* XXX placeholder for printing debug information */ 1377 1378} 1379 1380 1381static void 1382xn_tick(void *xsc) 1383{ 1384 struct netfront_info *sc; 1385 1386 sc = xsc; 1387 XN_RX_LOCK(sc); 1388 xn_tick_locked(sc); 1389 XN_RX_UNLOCK(sc); 1390 1391} 1392static void 1393xn_start_locked(struct ifnet *ifp) 1394{ 1395 int otherend_id; 1396 unsigned short id; 1397 struct mbuf *m_head, *m; 1398 struct netfront_info *sc; 1399 netif_tx_request_t *tx; 1400 netif_extra_info_t *extra; 1401 RING_IDX i; 1402 grant_ref_t ref; 1403 u_long mfn, tx_bytes; 1404 int notify, nfrags; 1405 1406 sc = ifp->if_softc; 1407 otherend_id = xenbus_get_otherend_id(sc->xbdev); 1408 tx_bytes = 0; 1409 1410 if (!netfront_carrier_ok(sc)) 1411 return; 1412 1413 for (i = sc->tx.req_prod_pvt; TRUE; i++) { 1414 IF_DEQUEUE(&ifp->if_snd, m_head); 1415 if (m_head == NULL) 1416 break; 1417 1418 /* 1419 * netfront_tx_slot_available() tries to do some math to 1420 * ensure that there'll be enough xenbus ring slots available 1421 * for the maximum number of packet fragments (and a couple more 1422 * for what I guess are TSO and other ring entry items.) 1423 */ 1424 if (!netfront_tx_slot_available(sc)) { 1425 IF_PREPEND(&ifp->if_snd, m_head); 1426 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1427 break; 1428 } 1429 1430 /* 1431 * Defragment the mbuf if necessary. 1432 */ 1433 for (m = m_head, nfrags = 0; m; m = m->m_next) 1434 nfrags++; 1435 if (nfrags > MAX_SKB_FRAGS) { 1436 m = m_defrag(m_head, M_DONTWAIT); 1437 if (!m) { 1438 m_freem(m_head); 1439 break; 1440 } 1441 m_head = m; 1442 } 1443 1444 /* Determine how many fragments now exist */ 1445 for (m = m_head, nfrags = 0; m; m = m->m_next) 1446 nfrags++; 1447 1448 /* 1449 * Don't attempt to queue this packet if there aren't enough free entries in the chain. 1450 * There isn't a 1:1 correspondance between the mbuf TX ring and the xenbus TX ring. 1451 * xn_txeof() may need to be called to free up some slots. 1452 * 1453 * It is quite possible that this can be later eliminated if it turns out that partial 1454 * packets can be pushed into the ringbuffer, with fragments pushed in when further slots 1455 * free up. 1456 * 1457 * It is also quite possible that the driver will lock up - Xen may not send another 1458 * interrupt to kick the tx/rx processing if the xenbus RX ring is full and xenbus TX ring 1459 * is empty - no further TX work can be done until space is made in the TX mbuf ring and 1460 * the RX side may be waiting for TX data to continue. It is quite possible some timer 1461 * event should be created to kick TX/RX processing along in certain conditions. 1462 */ 1463 1464 /* its not +1 like the allocation because we need to keep slot [0] free for the freelist head */ 1465 if (sc->xn_cdata.xn_tx_chain_cnt + nfrags >= NET_TX_RING_SIZE) { 1466 printf("xn_start_locked: xn_tx_chain_cnt (%d) + nfrags %d >= NET_TX_RING_SIZE (%d); must be full!\n", 1467 (int) sc->xn_cdata.xn_tx_chain_cnt, (int) nfrags, (int) NET_TX_RING_SIZE); 1468 IF_PREPEND(&ifp->if_snd, m_head); 1469 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1470 break; 1471 } 1472 1473 1474 /* 1475 * Start packing the mbufs in this chain into 1476 * the fragment pointers. Stop when we run out 1477 * of fragments or hit the end of the mbuf chain. 1478 */ 1479 m = m_head; 1480 extra = NULL; 1481 for (m = m_head; m; m = m->m_next) { 1482 tx = RING_GET_REQUEST(&sc->tx, i); 1483 id = get_id_from_freelist(sc->xn_cdata.xn_tx_chain); 1484 if (id == 0) 1485 panic("xn_start_locked: was allocated the freelist head!\n"); 1486 sc->xn_cdata.xn_tx_chain_cnt++; 1487 if (sc->xn_cdata.xn_tx_chain_cnt >= NET_TX_RING_SIZE+1) 1488 panic("xn_start_locked: tx_chain_cnt must be < NET_TX_RING_SIZE+1\n"); 1489 sc->xn_cdata.xn_tx_chain[id] = m; 1490 tx->id = id; 1491 ref = gnttab_claim_grant_reference(&sc->gref_tx_head); 1492 KASSERT((short)ref >= 0, ("Negative ref")); 1493 mfn = virt_to_mfn(mtod(m, vm_offset_t)); 1494 gnttab_grant_foreign_access_ref(ref, otherend_id, 1495 mfn, GNTMAP_readonly); 1496 tx->gref = sc->grant_tx_ref[id] = ref; 1497 tx->offset = mtod(m, vm_offset_t) & (PAGE_SIZE - 1); 1498 tx->flags = 0; 1499 if (m == m_head) { 1500 /* 1501 * The first fragment has the entire packet 1502 * size, subsequent fragments have just the 1503 * fragment size. The backend works out the 1504 * true size of the first fragment by 1505 * subtracting the sizes of the other 1506 * fragments. 1507 */ 1508 tx->size = m->m_pkthdr.len; 1509 1510 /* 1511 * The first fragment contains the 1512 * checksum flags and is optionally 1513 * followed by extra data for TSO etc. 1514 */ 1515 if (m->m_pkthdr.csum_flags 1516 & CSUM_DELAY_DATA) { 1517 tx->flags |= (NETTXF_csum_blank 1518 | NETTXF_data_validated); 1519 } 1520#if __FreeBSD_version >= 700000 1521 if (m->m_pkthdr.csum_flags & CSUM_TSO) { 1522 struct netif_extra_info *gso = 1523 (struct netif_extra_info *) 1524 RING_GET_REQUEST(&sc->tx, ++i); 1525 1526 if (extra) 1527 extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; 1528 else 1529 tx->flags |= NETTXF_extra_info; 1530 1531 gso->u.gso.size = m->m_pkthdr.tso_segsz; 1532 gso->u.gso.type = 1533 XEN_NETIF_GSO_TYPE_TCPV4; 1534 gso->u.gso.pad = 0; 1535 gso->u.gso.features = 0; 1536 1537 gso->type = XEN_NETIF_EXTRA_TYPE_GSO; 1538 gso->flags = 0; 1539 extra = gso; 1540 } 1541#endif 1542 } else { 1543 tx->size = m->m_len; 1544 } 1545 if (m->m_next) { 1546 tx->flags |= NETTXF_more_data; 1547 i++; 1548 } 1549 } 1550 1551 BPF_MTAP(ifp, m_head); 1552 1553 sc->stats.tx_bytes += m_head->m_pkthdr.len; 1554 sc->stats.tx_packets++; 1555 } 1556 1557 sc->tx.req_prod_pvt = i; 1558 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->tx, notify); 1559 if (notify) 1560 notify_remote_via_irq(sc->irq); 1561 1562 xn_txeof(sc); 1563 1564 if (RING_FULL(&sc->tx)) { 1565 sc->tx_full = 1; 1566#if 0 1567 netif_stop_queue(dev); 1568#endif 1569 } 1570 1571 return; 1572} 1573 1574static void 1575xn_start(struct ifnet *ifp) 1576{ 1577 struct netfront_info *sc; 1578 sc = ifp->if_softc; 1579 XN_TX_LOCK(sc); 1580 xn_start_locked(ifp); 1581 XN_TX_UNLOCK(sc); 1582} 1583 1584/* equivalent of network_open() in Linux */ 1585static void 1586xn_ifinit_locked(struct netfront_info *sc) 1587{ 1588 struct ifnet *ifp; 1589 1590 XN_LOCK_ASSERT(sc); 1591 1592 ifp = sc->xn_ifp; 1593 1594 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1595 return; 1596 1597 xn_stop(sc); 1598 1599 network_alloc_rx_buffers(sc); 1600 sc->rx.sring->rsp_event = sc->rx.rsp_cons + 1; 1601 1602 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1603 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1604 1605 callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc); 1606 1607} 1608 1609 1610static void 1611xn_ifinit(void *xsc) 1612{ 1613 struct netfront_info *sc = xsc; 1614 1615 XN_LOCK(sc); 1616 xn_ifinit_locked(sc); 1617 XN_UNLOCK(sc); 1618 1619} 1620 1621 1622static int 1623xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1624{ 1625 struct netfront_info *sc = ifp->if_softc; 1626 struct ifreq *ifr = (struct ifreq *) data; 1627 struct ifaddr *ifa = (struct ifaddr *)data; 1628 1629 int mask, error = 0; 1630 switch(cmd) { 1631 case SIOCSIFADDR: 1632 case SIOCGIFADDR: 1633 XN_LOCK(sc); 1634 if (ifa->ifa_addr->sa_family == AF_INET) { 1635 ifp->if_flags |= IFF_UP; 1636 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 1637 xn_ifinit_locked(sc); 1638 arp_ifinit(ifp, ifa); 1639 XN_UNLOCK(sc); 1640 } else { 1641 XN_UNLOCK(sc); 1642 error = ether_ioctl(ifp, cmd, data); 1643 } 1644 break; 1645 case SIOCSIFMTU: 1646 /* XXX can we alter the MTU on a VN ?*/ 1647#ifdef notyet 1648 if (ifr->ifr_mtu > XN_JUMBO_MTU) 1649 error = EINVAL; 1650 else 1651#endif 1652 { 1653 ifp->if_mtu = ifr->ifr_mtu; 1654 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1655 xn_ifinit(sc); 1656 } 1657 break; 1658 case SIOCSIFFLAGS: 1659 XN_LOCK(sc); 1660 if (ifp->if_flags & IFF_UP) { 1661 /* 1662 * If only the state of the PROMISC flag changed, 1663 * then just use the 'set promisc mode' command 1664 * instead of reinitializing the entire NIC. Doing 1665 * a full re-init means reloading the firmware and 1666 * waiting for it to start up, which may take a 1667 * second or two. 1668 */ 1669#ifdef notyet 1670 /* No promiscuous mode with Xen */ 1671 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1672 ifp->if_flags & IFF_PROMISC && 1673 !(sc->xn_if_flags & IFF_PROMISC)) { 1674 XN_SETBIT(sc, XN_RX_MODE, 1675 XN_RXMODE_RX_PROMISC); 1676 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1677 !(ifp->if_flags & IFF_PROMISC) && 1678 sc->xn_if_flags & IFF_PROMISC) { 1679 XN_CLRBIT(sc, XN_RX_MODE, 1680 XN_RXMODE_RX_PROMISC); 1681 } else 1682#endif 1683 xn_ifinit_locked(sc); 1684 } else { 1685 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1686 xn_stop(sc); 1687 } 1688 } 1689 sc->xn_if_flags = ifp->if_flags; 1690 XN_UNLOCK(sc); 1691 error = 0; 1692 break; 1693 case SIOCSIFCAP: 1694 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1695 if (mask & IFCAP_TXCSUM) { 1696 if (IFCAP_TXCSUM & ifp->if_capenable) { 1697 ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4); 1698 ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP 1699 | CSUM_IP | CSUM_TSO); 1700 } else { 1701 ifp->if_capenable |= IFCAP_TXCSUM; 1702 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP 1703 | CSUM_IP); 1704 } 1705 } 1706 if (mask & IFCAP_RXCSUM) { 1707 ifp->if_capenable ^= IFCAP_RXCSUM; 1708 } 1709#if __FreeBSD_version >= 700000 1710 if (mask & IFCAP_TSO4) { 1711 if (IFCAP_TSO4 & ifp->if_capenable) { 1712 ifp->if_capenable &= ~IFCAP_TSO4; 1713 ifp->if_hwassist &= ~CSUM_TSO; 1714 } else if (IFCAP_TXCSUM & ifp->if_capenable) { 1715 ifp->if_capenable |= IFCAP_TSO4; 1716 ifp->if_hwassist |= CSUM_TSO; 1717 } else { 1718 DPRINTK("Xen requires tx checksum offload" 1719 " be enabled to use TSO\n"); 1720 error = EINVAL; 1721 } 1722 } 1723 if (mask & IFCAP_LRO) { 1724 ifp->if_capenable ^= IFCAP_LRO; 1725 1726 } 1727#endif 1728 error = 0; 1729 break; 1730 case SIOCADDMULTI: 1731 case SIOCDELMULTI: 1732#ifdef notyet 1733 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1734 XN_LOCK(sc); 1735 xn_setmulti(sc); 1736 XN_UNLOCK(sc); 1737 error = 0; 1738 } 1739#endif 1740 /* FALLTHROUGH */ 1741 case SIOCSIFMEDIA: 1742 case SIOCGIFMEDIA: 1743 error = EINVAL; 1744 break; 1745 default: 1746 error = ether_ioctl(ifp, cmd, data); 1747 } 1748 1749 return (error); 1750} 1751 1752static void 1753xn_stop(struct netfront_info *sc) 1754{ 1755 struct ifnet *ifp; 1756 1757 XN_LOCK_ASSERT(sc); 1758 1759 ifp = sc->xn_ifp; 1760 1761 callout_stop(&sc->xn_stat_ch); 1762 1763 xn_free_rx_ring(sc); 1764 xn_free_tx_ring(sc); 1765 1766 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1767} 1768 1769/* START of Xenolinux helper functions adapted to FreeBSD */ 1770int 1771network_connect(struct netfront_info *np) 1772{ 1773 int i, requeue_idx, error; 1774 grant_ref_t ref; 1775 netif_rx_request_t *req; 1776 u_int feature_rx_copy, feature_rx_flip; 1777 1778 error = xenbus_scanf(XBT_NIL, xenbus_get_otherend_path(np->xbdev), 1779 "feature-rx-copy", NULL, "%u", &feature_rx_copy); 1780 if (error) 1781 feature_rx_copy = 0; 1782 error = xenbus_scanf(XBT_NIL, xenbus_get_otherend_path(np->xbdev), 1783 "feature-rx-flip", NULL, "%u", &feature_rx_flip); 1784 if (error) 1785 feature_rx_flip = 1; 1786 1787 /* 1788 * Copy packets on receive path if: 1789 * (a) This was requested by user, and the backend supports it; or 1790 * (b) Flipping was requested, but this is unsupported by the backend. 1791 */ 1792 np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) || 1793 (MODPARM_rx_flip && !feature_rx_flip)); 1794 1795 XN_LOCK(np); 1796 /* Recovery procedure: */ 1797 error = talk_to_backend(np->xbdev, np); 1798 if (error) 1799 return (error); 1800 1801 /* Step 1: Reinitialise variables. */ 1802 netif_release_tx_bufs(np); 1803 1804 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ 1805 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { 1806 struct mbuf *m; 1807 u_long pfn; 1808 1809 if (np->rx_mbufs[i] == NULL) 1810 continue; 1811 1812 m = np->rx_mbufs[requeue_idx] = xennet_get_rx_mbuf(np, i); 1813 ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); 1814 req = RING_GET_REQUEST(&np->rx, requeue_idx); 1815 pfn = vtophys(mtod(m, vm_offset_t)) >> PAGE_SHIFT; 1816 1817 if (!np->copying_receiver) { 1818 gnttab_grant_foreign_transfer_ref(ref, 1819 xenbus_get_otherend_id(np->xbdev), 1820 pfn); 1821 } else { 1822 gnttab_grant_foreign_access_ref(ref, 1823 xenbus_get_otherend_id(np->xbdev), 1824 PFNTOMFN(pfn), 0); 1825 } 1826 req->gref = ref; 1827 req->id = requeue_idx; 1828 1829 requeue_idx++; 1830 } 1831 1832 np->rx.req_prod_pvt = requeue_idx; 1833 1834 /* Step 3: All public and private state should now be sane. Get 1835 * ready to start sending and receiving packets and give the driver 1836 * domain a kick because we've probably just requeued some 1837 * packets. 1838 */ 1839 netfront_carrier_on(np); 1840 notify_remote_via_irq(np->irq); 1841 XN_TX_LOCK(np); 1842 xn_txeof(np); 1843 XN_TX_UNLOCK(np); 1844 network_alloc_rx_buffers(np); 1845 XN_UNLOCK(np); 1846 1847 return (0); 1848} 1849 1850static void 1851show_device(struct netfront_info *sc) 1852{ 1853#ifdef DEBUG 1854 if (sc) { 1855 IPRINTK("<vif handle=%u %s(%s) evtchn=%u irq=%u tx=%p rx=%p>\n", 1856 sc->xn_ifno, 1857 be_state_name[sc->xn_backend_state], 1858 sc->xn_user_state ? "open" : "closed", 1859 sc->xn_evtchn, 1860 sc->xn_irq, 1861 sc->xn_tx_if, 1862 sc->xn_rx_if); 1863 } else { 1864 IPRINTK("<vif NULL>\n"); 1865 } 1866#endif 1867} 1868 1869/** Create a network device. 1870 * @param handle device handle 1871 */ 1872int 1873create_netdev(device_t dev) 1874{ 1875 int i; 1876 struct netfront_info *np; 1877 int err; 1878 struct ifnet *ifp; 1879 1880 np = device_get_softc(dev); 1881 1882 np->xbdev = dev; 1883 1884 XN_LOCK_INIT(np, xennetif); 1885 np->rx_target = RX_MIN_TARGET; 1886 np->rx_min_target = RX_MIN_TARGET; 1887 np->rx_max_target = RX_MAX_TARGET; 1888 1889 /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */ 1890 for (i = 0; i <= NET_TX_RING_SIZE; i++) { 1891 np->tx_mbufs[i] = (void *) ((u_long) i+1); 1892 np->grant_tx_ref[i] = GRANT_INVALID_REF; 1893 } 1894 for (i = 0; i <= NET_RX_RING_SIZE; i++) { 1895 np->rx_mbufs[i] = NULL; 1896 np->grant_rx_ref[i] = GRANT_INVALID_REF; 1897 } 1898 /* A grant for every tx ring slot */ 1899 if (gnttab_alloc_grant_references(TX_MAX_TARGET, 1900 &np->gref_tx_head) < 0) { 1901 printf("#### netfront can't alloc tx grant refs\n"); 1902 err = ENOMEM; 1903 goto exit; 1904 } 1905 /* A grant for every rx ring slot */ 1906 if (gnttab_alloc_grant_references(RX_MAX_TARGET, 1907 &np->gref_rx_head) < 0) { 1908 printf("#### netfront can't alloc rx grant refs\n"); 1909 gnttab_free_grant_references(np->gref_tx_head); 1910 err = ENOMEM; 1911 goto exit; 1912 } 1913 1914 err = xen_net_read_mac(dev, np->mac); 1915 if (err) { 1916 xenbus_dev_fatal(dev, err, "parsing %s/mac", 1917 xenbus_get_node(dev)); 1918 goto out; 1919 } 1920 1921 /* Set up ifnet structure */ 1922 ifp = np->xn_ifp = if_alloc(IFT_ETHER); 1923 ifp->if_softc = np; 1924 if_initname(ifp, "xn", device_get_unit(dev)); 1925 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1926 ifp->if_ioctl = xn_ioctl; 1927 ifp->if_output = ether_output; 1928 ifp->if_start = xn_start; 1929#ifdef notyet 1930 ifp->if_watchdog = xn_watchdog; 1931#endif 1932 ifp->if_init = xn_ifinit; 1933 ifp->if_mtu = ETHERMTU; 1934 ifp->if_snd.ifq_maxlen = NET_TX_RING_SIZE - 1; 1935 1936 ifp->if_hwassist = XN_CSUM_FEATURES; 1937 ifp->if_capabilities = IFCAP_HWCSUM; 1938#if __FreeBSD_version >= 700000 1939 ifp->if_capabilities |= IFCAP_TSO4; 1940 if (xn_enable_lro) { 1941 int err = tcp_lro_init(&np->xn_lro); 1942 if (err) { 1943 device_printf(dev, "LRO initialization failed\n"); 1944 goto exit; 1945 } 1946 np->xn_lro.ifp = ifp; 1947 ifp->if_capabilities |= IFCAP_LRO; 1948 } 1949#endif 1950 ifp->if_capenable = ifp->if_capabilities; 1951 1952 ether_ifattach(ifp, np->mac); 1953 callout_init(&np->xn_stat_ch, CALLOUT_MPSAFE); 1954 netfront_carrier_off(np); 1955 1956 return (0); 1957 1958exit: 1959 gnttab_free_grant_references(np->gref_tx_head); 1960out: 1961 panic("do something smart"); 1962 1963} 1964 1965/** 1966 * Handle the change of state of the backend to Closing. We must delete our 1967 * device-layer structures now, to ensure that writes are flushed through to 1968 * the backend. Once is this done, we can switch to Closed in 1969 * acknowledgement. 1970 */ 1971#if 0 1972static void netfront_closing(device_t dev) 1973{ 1974#if 0 1975 struct netfront_info *info = dev->dev_driver_data; 1976 1977 DPRINTK("netfront_closing: %s removed\n", dev->nodename); 1978 1979 close_netdev(info); 1980#endif 1981 xenbus_switch_state(dev, XenbusStateClosed); 1982} 1983#endif 1984 1985static int netfront_detach(device_t dev) 1986{ 1987 struct netfront_info *info = device_get_softc(dev); 1988 1989 DPRINTK("%s\n", xenbus_get_node(dev)); 1990 1991 netif_free(info); 1992 1993 return 0; 1994} 1995 1996 1997static void netif_free(struct netfront_info *info) 1998{ 1999 netif_disconnect_backend(info); 2000#if 0 2001 close_netdev(info); 2002#endif 2003} 2004 2005static void netif_disconnect_backend(struct netfront_info *info) 2006{ 2007 XN_RX_LOCK(info); 2008 XN_TX_LOCK(info); 2009 netfront_carrier_off(info); 2010 XN_TX_UNLOCK(info); 2011 XN_RX_UNLOCK(info); 2012 2013 end_access(info->tx_ring_ref, info->tx.sring); 2014 end_access(info->rx_ring_ref, info->rx.sring); 2015 info->tx_ring_ref = GRANT_INVALID_REF; 2016 info->rx_ring_ref = GRANT_INVALID_REF; 2017 info->tx.sring = NULL; 2018 info->rx.sring = NULL; 2019 2020 if (info->irq) 2021 unbind_from_irqhandler(info->irq); 2022 2023 info->irq = 0; 2024} 2025 2026 2027static void end_access(int ref, void *page) 2028{ 2029 if (ref != GRANT_INVALID_REF) 2030 gnttab_end_foreign_access(ref, page); 2031} 2032 2033/* ** Driver registration ** */ 2034static device_method_t netfront_methods[] = { 2035 /* Device interface */ 2036 DEVMETHOD(device_probe, netfront_probe), 2037 DEVMETHOD(device_attach, netfront_attach), 2038 DEVMETHOD(device_detach, netfront_detach), 2039 DEVMETHOD(device_shutdown, bus_generic_shutdown), 2040 DEVMETHOD(device_suspend, bus_generic_suspend), 2041 DEVMETHOD(device_resume, netfront_resume), 2042 2043 /* Xenbus interface */ 2044 DEVMETHOD(xenbus_backend_changed, netfront_backend_changed), 2045 2046 { 0, 0 } 2047}; 2048 2049static driver_t netfront_driver = { 2050 "xn", 2051 netfront_methods, 2052 sizeof(struct netfront_info), 2053}; 2054devclass_t netfront_devclass; 2055 2056DRIVER_MODULE(xe, xenbus, netfront_driver, netfront_devclass, 0, 0); 2057