1/*- 2 * Copyright (c) 2004-2006 Kip Macy 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: stable/10/sys/dev/xen/netfront/netfront.c 316170 2017-03-29 17:11:41Z ngie $"); 29 30#include "opt_inet.h" 31#include "opt_inet6.h" 32 33#include <sys/param.h> 34#include <sys/systm.h> 35#include <sys/sockio.h> 36#include <sys/mbuf.h> 37#include <sys/malloc.h> 38#include <sys/module.h> 39#include <sys/kernel.h> 40#include <sys/socket.h> 41#include <sys/sysctl.h> 42#include <sys/queue.h> 43#include <sys/lock.h> 44#include <sys/sx.h> 45#include <sys/limits.h> 46 47#include <net/if.h> 48#include <net/if_arp.h> 49#include <net/ethernet.h> 50#include <net/if_dl.h> 51#include <net/if_media.h> 52 53#include <net/bpf.h> 54 55#include <net/if_types.h> 56#include <net/if.h> 57 58#include <netinet/in_systm.h> 59#include <netinet/in.h> 60#include <netinet/ip.h> 61#include <netinet/if_ether.h> 62#if __FreeBSD_version >= 700000 63#include <netinet/tcp.h> 64#include <netinet/tcp_lro.h> 65#endif 66 67#include <vm/vm.h> 68#include <vm/pmap.h> 69 70#include <machine/clock.h> /* for DELAY */ 71#include <machine/bus.h> 72#include <machine/resource.h> 73#include <machine/frame.h> 74#include <machine/vmparam.h> 75 76#include <sys/bus.h> 77#include <sys/rman.h> 78 79#include <machine/intr_machdep.h> 80 81#include <xen/xen-os.h> 82#include <xen/hypervisor.h> 83#include <xen/xen_intr.h> 84#include <xen/gnttab.h> 85#include <xen/interface/memory.h> 86#include <xen/interface/io/netif.h> 87#include <xen/xenbus/xenbusvar.h> 88 89#include <machine/xen/xenvar.h> 90 91#include <dev/xen/netfront/mbufq.h> 92 93#include "xenbus_if.h" 94 95/* Features supported by all backends. TSO and LRO can be negotiated */ 96#define XN_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 97 98#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE) 99#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE) 100 101#if __FreeBSD_version >= 700000 102/* 103 * Should the driver do LRO on the RX end 104 * this can be toggled on the fly, but the 105 * interface must be reset (down/up) for it 106 * to take effect. 107 */ 108static int xn_enable_lro = 1; 109TUNABLE_INT("hw.xn.enable_lro", &xn_enable_lro); 110#else 111 112#define IFCAP_TSO4 0 113#define CSUM_TSO 0 114 115#endif 116 117#ifdef CONFIG_XEN 118static int MODPARM_rx_copy = 0; 119module_param_named(rx_copy, MODPARM_rx_copy, bool, 0); 120MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)"); 121static int MODPARM_rx_flip = 0; 122module_param_named(rx_flip, MODPARM_rx_flip, bool, 0); 123MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)"); 124#else 125static const int MODPARM_rx_copy = 1; 126static const int MODPARM_rx_flip = 0; 127#endif 128 129/** 130 * \brief The maximum allowed data fragments in a single transmit 131 * request. 132 * 133 * This limit is imposed by the backend driver. We assume here that 134 * we are dealing with a Linux driver domain and have set our limit 135 * to mirror the Linux MAX_SKB_FRAGS constant. 136 */ 137#define MAX_TX_REQ_FRAGS (65536 / PAGE_SIZE + 2) 138 139#define RX_COPY_THRESHOLD 256 140 141#define net_ratelimit() 0 142 143struct netfront_info; 144struct netfront_rx_info; 145 146static void xn_txeof(struct netfront_info *); 147static void xn_rxeof(struct netfront_info *); 148static void network_alloc_rx_buffers(struct netfront_info *); 149 150static void xn_tick_locked(struct netfront_info *); 151static void xn_tick(void *); 152 153static void xn_intr(void *); 154static inline int xn_count_frags(struct mbuf *m); 155static int xn_assemble_tx_request(struct netfront_info *sc, 156 struct mbuf *m_head); 157static void xn_start_locked(struct ifnet *); 158static void xn_start(struct ifnet *); 159static int xn_ioctl(struct ifnet *, u_long, caddr_t); 160static void xn_ifinit_locked(struct netfront_info *); 161static void xn_ifinit(void *); 162static void xn_stop(struct netfront_info *); 163static void xn_query_features(struct netfront_info *np); 164static int xn_configure_features(struct netfront_info *np); 165#ifdef notyet 166static void xn_watchdog(struct ifnet *); 167#endif 168 169#ifdef notyet 170static void netfront_closing(device_t dev); 171#endif 172static void netif_free(struct netfront_info *info); 173static int netfront_detach(device_t dev); 174 175static int talk_to_backend(device_t dev, struct netfront_info *info); 176static int create_netdev(device_t dev); 177static void netif_disconnect_backend(struct netfront_info *info); 178static int setup_device(device_t dev, struct netfront_info *info); 179static void free_ring(int *ref, void *ring_ptr_ref); 180 181static int xn_ifmedia_upd(struct ifnet *ifp); 182static void xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); 183 184/* Xenolinux helper functions */ 185int network_connect(struct netfront_info *); 186 187static void xn_free_rx_ring(struct netfront_info *); 188 189static void xn_free_tx_ring(struct netfront_info *); 190 191static int xennet_get_responses(struct netfront_info *np, 192 struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons, 193 struct mbuf **list, int *pages_flipped_p); 194 195#define virt_to_mfn(x) (vtomach(x) >> PAGE_SHIFT) 196 197#define INVALID_P2M_ENTRY (~0UL) 198 199/* 200 * Mbuf pointers. We need these to keep track of the virtual addresses 201 * of our mbuf chains since we can only convert from virtual to physical, 202 * not the other way around. The size must track the free index arrays. 203 */ 204struct xn_chain_data { 205 struct mbuf *xn_tx_chain[NET_TX_RING_SIZE+1]; 206 int xn_tx_chain_cnt; 207 struct mbuf *xn_rx_chain[NET_RX_RING_SIZE+1]; 208}; 209 210struct net_device_stats 211{ 212 u_long rx_packets; /* total packets received */ 213 u_long tx_packets; /* total packets transmitted */ 214 u_long rx_bytes; /* total bytes received */ 215 u_long tx_bytes; /* total bytes transmitted */ 216 u_long rx_errors; /* bad packets received */ 217 u_long tx_errors; /* packet transmit problems */ 218 u_long rx_dropped; /* no space in linux buffers */ 219 u_long tx_dropped; /* no space available in linux */ 220 u_long multicast; /* multicast packets received */ 221 u_long collisions; 222 223 /* detailed rx_errors: */ 224 u_long rx_length_errors; 225 u_long rx_over_errors; /* receiver ring buff overflow */ 226 u_long rx_crc_errors; /* recved pkt with crc error */ 227 u_long rx_frame_errors; /* recv'd frame alignment error */ 228 u_long rx_fifo_errors; /* recv'r fifo overrun */ 229 u_long rx_missed_errors; /* receiver missed packet */ 230 231 /* detailed tx_errors */ 232 u_long tx_aborted_errors; 233 u_long tx_carrier_errors; 234 u_long tx_fifo_errors; 235 u_long tx_heartbeat_errors; 236 u_long tx_window_errors; 237 238 /* for cslip etc */ 239 u_long rx_compressed; 240 u_long tx_compressed; 241}; 242 243struct netfront_info { 244 struct ifnet *xn_ifp; 245#if __FreeBSD_version >= 700000 246 struct lro_ctrl xn_lro; 247#endif 248 249 struct net_device_stats stats; 250 u_int tx_full; 251 252 netif_tx_front_ring_t tx; 253 netif_rx_front_ring_t rx; 254 255 struct mtx tx_lock; 256 struct mtx rx_lock; 257 struct mtx sc_lock; 258 259 xen_intr_handle_t xen_intr_handle; 260 u_int copying_receiver; 261 u_int carrier; 262 u_int maxfrags; 263 264 /* Receive-ring batched refills. */ 265#define RX_MIN_TARGET 32 266#define RX_MAX_TARGET NET_RX_RING_SIZE 267 int rx_min_target; 268 int rx_max_target; 269 int rx_target; 270 271 grant_ref_t gref_tx_head; 272 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1]; 273 grant_ref_t gref_rx_head; 274 grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1]; 275 276 device_t xbdev; 277 int tx_ring_ref; 278 int rx_ring_ref; 279 uint8_t mac[ETHER_ADDR_LEN]; 280 struct xn_chain_data xn_cdata; /* mbufs */ 281 struct mbuf_head xn_rx_batch; /* head of the batch queue */ 282 283 int xn_if_flags; 284 struct callout xn_stat_ch; 285 286 u_long rx_pfn_array[NET_RX_RING_SIZE]; 287 multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1]; 288 mmu_update_t rx_mmu[NET_RX_RING_SIZE]; 289 struct ifmedia sc_media; 290 291 bool xn_resume; 292}; 293 294#define rx_mbufs xn_cdata.xn_rx_chain 295#define tx_mbufs xn_cdata.xn_tx_chain 296 297#define XN_LOCK_INIT(_sc, _name) \ 298 mtx_init(&(_sc)->tx_lock, #_name"_tx", "network transmit lock", MTX_DEF); \ 299 mtx_init(&(_sc)->rx_lock, #_name"_rx", "network receive lock", MTX_DEF); \ 300 mtx_init(&(_sc)->sc_lock, #_name"_sc", "netfront softc lock", MTX_DEF) 301 302#define XN_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_lock) 303#define XN_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_lock) 304 305#define XN_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_lock) 306#define XN_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_lock) 307 308#define XN_LOCK(_sc) mtx_lock(&(_sc)->sc_lock); 309#define XN_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_lock); 310 311#define XN_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_lock, MA_OWNED); 312#define XN_RX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rx_lock, MA_OWNED); 313#define XN_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_lock, MA_OWNED); 314#define XN_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_lock); \ 315 mtx_destroy(&(_sc)->tx_lock); \ 316 mtx_destroy(&(_sc)->sc_lock); 317 318struct netfront_rx_info { 319 struct netif_rx_response rx; 320 struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; 321}; 322 323#define netfront_carrier_on(netif) ((netif)->carrier = 1) 324#define netfront_carrier_off(netif) ((netif)->carrier = 0) 325#define netfront_carrier_ok(netif) ((netif)->carrier) 326 327/* Access macros for acquiring freeing slots in xn_free_{tx,rx}_idxs[]. */ 328 329static inline void 330add_id_to_freelist(struct mbuf **list, uintptr_t id) 331{ 332 KASSERT(id != 0, 333 ("%s: the head item (0) must always be free.", __func__)); 334 list[id] = list[0]; 335 list[0] = (struct mbuf *)id; 336} 337 338static inline unsigned short 339get_id_from_freelist(struct mbuf **list) 340{ 341 uintptr_t id; 342 343 id = (uintptr_t)list[0]; 344 KASSERT(id != 0, 345 ("%s: the head item (0) must always remain free.", __func__)); 346 list[0] = list[id]; 347 return (id); 348} 349 350static inline int 351xennet_rxidx(RING_IDX idx) 352{ 353 return idx & (NET_RX_RING_SIZE - 1); 354} 355 356static inline struct mbuf * 357xennet_get_rx_mbuf(struct netfront_info *np, RING_IDX ri) 358{ 359 int i = xennet_rxidx(ri); 360 struct mbuf *m; 361 362 m = np->rx_mbufs[i]; 363 np->rx_mbufs[i] = NULL; 364 return (m); 365} 366 367static inline grant_ref_t 368xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri) 369{ 370 int i = xennet_rxidx(ri); 371 grant_ref_t ref = np->grant_rx_ref[i]; 372 KASSERT(ref != GRANT_REF_INVALID, ("Invalid grant reference!\n")); 373 np->grant_rx_ref[i] = GRANT_REF_INVALID; 374 return ref; 375} 376 377#define IPRINTK(fmt, args...) \ 378 printf("[XEN] " fmt, ##args) 379#ifdef INVARIANTS 380#define WPRINTK(fmt, args...) \ 381 printf("[XEN] " fmt, ##args) 382#else 383#define WPRINTK(fmt, args...) 384#endif 385#ifdef DEBUG 386#define DPRINTK(fmt, args...) \ 387 printf("[XEN] %s: " fmt, __func__, ##args) 388#else 389#define DPRINTK(fmt, args...) 390#endif 391 392/** 393 * Read the 'mac' node at the given device's node in the store, and parse that 394 * as colon-separated octets, placing result the given mac array. mac must be 395 * a preallocated array of length ETH_ALEN (as declared in linux/if_ether.h). 396 * Return 0 on success, or errno on error. 397 */ 398static int 399xen_net_read_mac(device_t dev, uint8_t mac[]) 400{ 401 int error, i; 402 char *s, *e, *macstr; 403 const char *path; 404 405 path = xenbus_get_node(dev); 406 error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr); 407 if (error == ENOENT) { 408 /* 409 * Deal with missing mac XenStore nodes on devices with 410 * HVM emulation (the 'ioemu' configuration attribute) 411 * enabled. 412 * 413 * The HVM emulator may execute in a stub device model 414 * domain which lacks the permission, only given to Dom0, 415 * to update the guest's XenStore tree. For this reason, 416 * the HVM emulator doesn't even attempt to write the 417 * front-side mac node, even when operating in Dom0. 418 * However, there should always be a mac listed in the 419 * backend tree. Fallback to this version if our query 420 * of the front side XenStore location doesn't find 421 * anything. 422 */ 423 path = xenbus_get_otherend_path(dev); 424 error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr); 425 } 426 if (error != 0) { 427 xenbus_dev_fatal(dev, error, "parsing %s/mac", path); 428 return (error); 429 } 430 431 s = macstr; 432 for (i = 0; i < ETHER_ADDR_LEN; i++) { 433 mac[i] = strtoul(s, &e, 16); 434 if (s == e || (e[0] != ':' && e[0] != 0)) { 435 free(macstr, M_XENBUS); 436 return (ENOENT); 437 } 438 s = &e[1]; 439 } 440 free(macstr, M_XENBUS); 441 return (0); 442} 443 444/** 445 * Entry point to this code when a new device is created. Allocate the basic 446 * structures and the ring buffers for communication with the backend, and 447 * inform the backend of the appropriate details for those. Switch to 448 * Connected state. 449 */ 450static int 451netfront_probe(device_t dev) 452{ 453 454#ifdef XENHVM 455 if (xen_disable_pv_nics != 0) 456 return (ENXIO); 457#endif 458 459 if (!strcmp(xenbus_get_type(dev), "vif")) { 460 device_set_desc(dev, "Virtual Network Interface"); 461 return (0); 462 } 463 464 return (ENXIO); 465} 466 467static int 468netfront_attach(device_t dev) 469{ 470 int err; 471 472 err = create_netdev(dev); 473 if (err) { 474 xenbus_dev_fatal(dev, err, "creating netdev"); 475 return (err); 476 } 477 478#if __FreeBSD_version >= 700000 479 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 480 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 481 OID_AUTO, "enable_lro", CTLFLAG_RW, 482 &xn_enable_lro, 0, "Large Receive Offload"); 483#endif 484 485 return (0); 486} 487 488static int 489netfront_suspend(device_t dev) 490{ 491 struct netfront_info *info = device_get_softc(dev); 492 493 XN_RX_LOCK(info); 494 XN_TX_LOCK(info); 495 netfront_carrier_off(info); 496 XN_TX_UNLOCK(info); 497 XN_RX_UNLOCK(info); 498 return (0); 499} 500 501/** 502 * We are reconnecting to the backend, due to a suspend/resume, or a backend 503 * driver restart. We tear down our netif structure and recreate it, but 504 * leave the device-layer structures intact so that this is transparent to the 505 * rest of the kernel. 506 */ 507static int 508netfront_resume(device_t dev) 509{ 510 struct netfront_info *info = device_get_softc(dev); 511 512 if (xen_suspend_cancelled) { 513 XN_RX_LOCK(info); 514 XN_TX_LOCK(info); 515 netfront_carrier_on(info); 516 XN_TX_UNLOCK(info); 517 XN_RX_UNLOCK(info); 518 return (0); 519 } 520 521 info->xn_resume = true; 522 netif_disconnect_backend(info); 523 return (0); 524} 525 526/* Common code used when first setting up, and when resuming. */ 527static int 528talk_to_backend(device_t dev, struct netfront_info *info) 529{ 530 const char *message; 531 struct xs_transaction xst; 532 const char *node = xenbus_get_node(dev); 533 int err; 534 535 err = xen_net_read_mac(dev, info->mac); 536 if (err) { 537 xenbus_dev_fatal(dev, err, "parsing %s/mac", node); 538 goto out; 539 } 540 541 /* Create shared ring, alloc event channel. */ 542 err = setup_device(dev, info); 543 if (err) 544 goto out; 545 546 again: 547 err = xs_transaction_start(&xst); 548 if (err) { 549 xenbus_dev_fatal(dev, err, "starting transaction"); 550 goto destroy_ring; 551 } 552 err = xs_printf(xst, node, "tx-ring-ref","%u", 553 info->tx_ring_ref); 554 if (err) { 555 message = "writing tx ring-ref"; 556 goto abort_transaction; 557 } 558 err = xs_printf(xst, node, "rx-ring-ref","%u", 559 info->rx_ring_ref); 560 if (err) { 561 message = "writing rx ring-ref"; 562 goto abort_transaction; 563 } 564 err = xs_printf(xst, node, 565 "event-channel", "%u", 566 xen_intr_port(info->xen_intr_handle)); 567 if (err) { 568 message = "writing event-channel"; 569 goto abort_transaction; 570 } 571 err = xs_printf(xst, node, "request-rx-copy", "%u", 572 info->copying_receiver); 573 if (err) { 574 message = "writing request-rx-copy"; 575 goto abort_transaction; 576 } 577 err = xs_printf(xst, node, "feature-rx-notify", "%d", 1); 578 if (err) { 579 message = "writing feature-rx-notify"; 580 goto abort_transaction; 581 } 582 err = xs_printf(xst, node, "feature-sg", "%d", 1); 583 if (err) { 584 message = "writing feature-sg"; 585 goto abort_transaction; 586 } 587#if __FreeBSD_version >= 700000 588 err = xs_printf(xst, node, "feature-gso-tcpv4", "%d", 1); 589 if (err) { 590 message = "writing feature-gso-tcpv4"; 591 goto abort_transaction; 592 } 593#endif 594 595 err = xs_transaction_end(xst, 0); 596 if (err) { 597 if (err == EAGAIN) 598 goto again; 599 xenbus_dev_fatal(dev, err, "completing transaction"); 600 goto destroy_ring; 601 } 602 603 return 0; 604 605 abort_transaction: 606 xs_transaction_end(xst, 1); 607 xenbus_dev_fatal(dev, err, "%s", message); 608 destroy_ring: 609 netif_free(info); 610 out: 611 return err; 612} 613 614static int 615setup_device(device_t dev, struct netfront_info *info) 616{ 617 netif_tx_sring_t *txs; 618 netif_rx_sring_t *rxs; 619 int error; 620 struct ifnet *ifp; 621 622 ifp = info->xn_ifp; 623 624 info->tx_ring_ref = GRANT_REF_INVALID; 625 info->rx_ring_ref = GRANT_REF_INVALID; 626 info->rx.sring = NULL; 627 info->tx.sring = NULL; 628 629 txs = (netif_tx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO); 630 if (!txs) { 631 error = ENOMEM; 632 xenbus_dev_fatal(dev, error, "allocating tx ring page"); 633 goto fail; 634 } 635 SHARED_RING_INIT(txs); 636 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); 637 error = xenbus_grant_ring(dev, virt_to_mfn(txs), &info->tx_ring_ref); 638 if (error) 639 goto fail; 640 641 rxs = (netif_rx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO); 642 if (!rxs) { 643 error = ENOMEM; 644 xenbus_dev_fatal(dev, error, "allocating rx ring page"); 645 goto fail; 646 } 647 SHARED_RING_INIT(rxs); 648 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); 649 650 error = xenbus_grant_ring(dev, virt_to_mfn(rxs), &info->rx_ring_ref); 651 if (error) 652 goto fail; 653 654 error = xen_intr_alloc_and_bind_local_port(dev, 655 xenbus_get_otherend_id(dev), /*filter*/NULL, xn_intr, info, 656 INTR_TYPE_NET | INTR_MPSAFE | INTR_ENTROPY, &info->xen_intr_handle); 657 658 if (error) { 659 xenbus_dev_fatal(dev, error, 660 "xen_intr_alloc_and_bind_local_port failed"); 661 goto fail; 662 } 663 664 return (0); 665 666 fail: 667 netif_free(info); 668 return (error); 669} 670 671#ifdef INET 672/** 673 * If this interface has an ipv4 address, send an arp for it. This 674 * helps to get the network going again after migrating hosts. 675 */ 676static void 677netfront_send_fake_arp(device_t dev, struct netfront_info *info) 678{ 679 struct ifnet *ifp; 680 struct ifaddr *ifa; 681 682 ifp = info->xn_ifp; 683 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 684 if (ifa->ifa_addr->sa_family == AF_INET) { 685 arp_ifinit(ifp, ifa); 686 } 687 } 688} 689#endif 690 691/** 692 * Callback received when the backend's state changes. 693 */ 694static void 695netfront_backend_changed(device_t dev, XenbusState newstate) 696{ 697 struct netfront_info *sc = device_get_softc(dev); 698 699 DPRINTK("newstate=%d\n", newstate); 700 701 switch (newstate) { 702 case XenbusStateInitialising: 703 case XenbusStateInitialised: 704 case XenbusStateUnknown: 705 case XenbusStateClosed: 706 case XenbusStateReconfigured: 707 case XenbusStateReconfiguring: 708 break; 709 case XenbusStateInitWait: 710 if (xenbus_get_state(dev) != XenbusStateInitialising) 711 break; 712 if (network_connect(sc) != 0) 713 break; 714 xenbus_set_state(dev, XenbusStateConnected); 715 break; 716 case XenbusStateClosing: 717 xenbus_set_state(dev, XenbusStateClosed); 718 break; 719 case XenbusStateConnected: 720#ifdef INET 721 netfront_send_fake_arp(dev, sc); 722#endif 723 break; 724 } 725} 726 727static void 728xn_free_rx_ring(struct netfront_info *sc) 729{ 730#if 0 731 int i; 732 733 for (i = 0; i < NET_RX_RING_SIZE; i++) { 734 if (sc->xn_cdata.rx_mbufs[i] != NULL) { 735 m_freem(sc->rx_mbufs[i]); 736 sc->rx_mbufs[i] = NULL; 737 } 738 } 739 740 sc->rx.rsp_cons = 0; 741 sc->xn_rx_if->req_prod = 0; 742 sc->xn_rx_if->event = sc->rx.rsp_cons ; 743#endif 744} 745 746static void 747xn_free_tx_ring(struct netfront_info *sc) 748{ 749#if 0 750 int i; 751 752 for (i = 0; i < NET_TX_RING_SIZE; i++) { 753 if (sc->tx_mbufs[i] != NULL) { 754 m_freem(sc->tx_mbufs[i]); 755 sc->xn_cdata.xn_tx_chain[i] = NULL; 756 } 757 } 758 759 return; 760#endif 761} 762 763/** 764 * \brief Verify that there is sufficient space in the Tx ring 765 * buffer for a maximally sized request to be enqueued. 766 * 767 * A transmit request requires a transmit descriptor for each packet 768 * fragment, plus up to 2 entries for "options" (e.g. TSO). 769 */ 770static inline int 771xn_tx_slot_available(struct netfront_info *np) 772{ 773 return (RING_FREE_REQUESTS(&np->tx) > (MAX_TX_REQ_FRAGS + 2)); 774} 775 776static void 777netif_release_tx_bufs(struct netfront_info *np) 778{ 779 int i; 780 781 for (i = 1; i <= NET_TX_RING_SIZE; i++) { 782 struct mbuf *m; 783 784 m = np->tx_mbufs[i]; 785 786 /* 787 * We assume that no kernel addresses are 788 * less than NET_TX_RING_SIZE. Any entry 789 * in the table that is below this number 790 * must be an index from free-list tracking. 791 */ 792 if (((uintptr_t)m) <= NET_TX_RING_SIZE) 793 continue; 794 gnttab_end_foreign_access_ref(np->grant_tx_ref[i]); 795 gnttab_release_grant_reference(&np->gref_tx_head, 796 np->grant_tx_ref[i]); 797 np->grant_tx_ref[i] = GRANT_REF_INVALID; 798 add_id_to_freelist(np->tx_mbufs, i); 799 np->xn_cdata.xn_tx_chain_cnt--; 800 if (np->xn_cdata.xn_tx_chain_cnt < 0) { 801 panic("%s: tx_chain_cnt must be >= 0", __func__); 802 } 803 m_free(m); 804 } 805} 806 807static void 808netif_release_rx_bufs_copy(struct netfront_info *np) 809{ 810 struct mbuf *m; 811 grant_ref_t ref; 812 unsigned int i, busy, inuse; 813 814 XN_RX_LOCK(np); 815 816 for (busy = inuse = i = 0; i < NET_RX_RING_SIZE; i++) { 817 ref = np->grant_rx_ref[i]; 818 819 if (ref == GRANT_REF_INVALID) 820 continue; 821 822 inuse++; 823 824 m = np->rx_mbufs[i]; 825 826 if (!gnttab_end_foreign_access_ref(ref)) { 827 busy++; 828 continue; 829 } 830 831 gnttab_release_grant_reference(&np->gref_rx_head, ref); 832 np->grant_rx_ref[i] = GRANT_REF_INVALID; 833 add_id_to_freelist(np->rx_mbufs, i); 834 835 m_freem(m); 836 } 837 838 if (busy != 0) 839 device_printf(np->xbdev, 840 "Unable to release %u of %u in use grant references out of %zu total.\n", 841 busy, inuse, NET_RX_RING_SIZE); 842 843 XN_RX_UNLOCK(np); 844} 845 846static void 847network_alloc_rx_buffers(struct netfront_info *sc) 848{ 849 int otherend_id = xenbus_get_otherend_id(sc->xbdev); 850 unsigned short id; 851 struct mbuf *m_new; 852 int i, batch_target, notify; 853 RING_IDX req_prod; 854 struct xen_memory_reservation reservation; 855 grant_ref_t ref; 856 int nr_flips; 857 netif_rx_request_t *req; 858 vm_offset_t vaddr; 859 u_long pfn; 860 861 req_prod = sc->rx.req_prod_pvt; 862 863 if (__predict_false(sc->carrier == 0)) 864 return; 865 866 /* 867 * Allocate mbufs greedily, even though we batch updates to the 868 * receive ring. This creates a less bursty demand on the memory 869 * allocator, and so should reduce the chance of failed allocation 870 * requests both for ourself and for other kernel subsystems. 871 * 872 * Here we attempt to maintain rx_target buffers in flight, counting 873 * buffers that we have yet to process in the receive ring. 874 */ 875 batch_target = sc->rx_target - (req_prod - sc->rx.rsp_cons); 876 for (i = mbufq_len(&sc->xn_rx_batch); i < batch_target; i++) { 877 MGETHDR(m_new, M_NOWAIT, MT_DATA); 878 if (m_new == NULL) { 879 printf("%s: MGETHDR failed\n", __func__); 880 goto no_mbuf; 881 } 882 883 m_cljget(m_new, M_NOWAIT, MJUMPAGESIZE); 884 if ((m_new->m_flags & M_EXT) == 0) { 885 printf("%s: m_cljget failed\n", __func__); 886 m_freem(m_new); 887 888no_mbuf: 889 if (i != 0) 890 goto refill; 891 /* 892 * XXX set timer 893 */ 894 break; 895 } 896 m_new->m_len = m_new->m_pkthdr.len = MJUMPAGESIZE; 897 898 /* queue the mbufs allocated */ 899 mbufq_tail(&sc->xn_rx_batch, m_new); 900 } 901 902 /* 903 * If we've allocated at least half of our target number of entries, 904 * submit them to the backend - we have enough to make the overhead 905 * of submission worthwhile. Otherwise wait for more mbufs and 906 * request entries to become available. 907 */ 908 if (i < (sc->rx_target/2)) { 909 if (req_prod >sc->rx.sring->req_prod) 910 goto push; 911 return; 912 } 913 914 /* 915 * Double floating fill target if we risked having the backend 916 * run out of empty buffers for receive traffic. We define "running 917 * low" as having less than a fourth of our target buffers free 918 * at the time we refilled the queue. 919 */ 920 if ((req_prod - sc->rx.sring->rsp_prod) < (sc->rx_target / 4)) { 921 sc->rx_target *= 2; 922 if (sc->rx_target > sc->rx_max_target) 923 sc->rx_target = sc->rx_max_target; 924 } 925 926refill: 927 for (nr_flips = i = 0; ; i++) { 928 if ((m_new = mbufq_dequeue(&sc->xn_rx_batch)) == NULL) 929 break; 930 931 m_new->m_ext.ext_arg1 = (vm_paddr_t *)(uintptr_t)( 932 vtophys(m_new->m_ext.ext_buf) >> PAGE_SHIFT); 933 934 id = xennet_rxidx(req_prod + i); 935 936 KASSERT(sc->rx_mbufs[id] == NULL, ("non-NULL xm_rx_chain")); 937 sc->rx_mbufs[id] = m_new; 938 939 ref = gnttab_claim_grant_reference(&sc->gref_rx_head); 940 KASSERT(ref != GNTTAB_LIST_END, 941 ("reserved grant references exhuasted")); 942 sc->grant_rx_ref[id] = ref; 943 944 vaddr = mtod(m_new, vm_offset_t); 945 pfn = vtophys(vaddr) >> PAGE_SHIFT; 946 req = RING_GET_REQUEST(&sc->rx, req_prod + i); 947 948 if (sc->copying_receiver == 0) { 949 gnttab_grant_foreign_transfer_ref(ref, 950 otherend_id, pfn); 951 sc->rx_pfn_array[nr_flips] = PFNTOMFN(pfn); 952 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 953 /* Remove this page before passing 954 * back to Xen. 955 */ 956 set_phys_to_machine(pfn, INVALID_P2M_ENTRY); 957 MULTI_update_va_mapping(&sc->rx_mcl[i], 958 vaddr, 0, 0); 959 } 960 nr_flips++; 961 } else { 962 gnttab_grant_foreign_access_ref(ref, 963 otherend_id, 964 PFNTOMFN(pfn), 0); 965 } 966 req->id = id; 967 req->gref = ref; 968 969 sc->rx_pfn_array[i] = 970 vtomach(mtod(m_new,vm_offset_t)) >> PAGE_SHIFT; 971 } 972 973 KASSERT(i, ("no mbufs processed")); /* should have returned earlier */ 974 KASSERT(mbufq_len(&sc->xn_rx_batch) == 0, ("not all mbufs processed")); 975 /* 976 * We may have allocated buffers which have entries outstanding 977 * in the page * update queue -- make sure we flush those first! 978 */ 979 PT_UPDATES_FLUSH(); 980 if (nr_flips != 0) { 981#ifdef notyet 982 /* Tell the ballon driver what is going on. */ 983 balloon_update_driver_allowance(i); 984#endif 985 set_xen_guest_handle(reservation.extent_start, sc->rx_pfn_array); 986 reservation.nr_extents = i; 987 reservation.extent_order = 0; 988 reservation.address_bits = 0; 989 reservation.domid = DOMID_SELF; 990 991 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 992 /* After all PTEs have been zapped, flush the TLB. */ 993 sc->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = 994 UVMF_TLB_FLUSH|UVMF_ALL; 995 996 /* Give away a batch of pages. */ 997 sc->rx_mcl[i].op = __HYPERVISOR_memory_op; 998 sc->rx_mcl[i].args[0] = XENMEM_decrease_reservation; 999 sc->rx_mcl[i].args[1] = (u_long)&reservation; 1000 /* Zap PTEs and give away pages in one big multicall. */ 1001 (void)HYPERVISOR_multicall(sc->rx_mcl, i+1); 1002 1003 if (__predict_false(sc->rx_mcl[i].result != i || 1004 HYPERVISOR_memory_op(XENMEM_decrease_reservation, 1005 &reservation) != i)) 1006 panic("%s: unable to reduce memory " 1007 "reservation\n", __func__); 1008 } 1009 } else { 1010 wmb(); 1011 } 1012 1013 /* Above is a suitable barrier to ensure backend will see requests. */ 1014 sc->rx.req_prod_pvt = req_prod + i; 1015push: 1016 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->rx, notify); 1017 if (notify) 1018 xen_intr_signal(sc->xen_intr_handle); 1019} 1020 1021static void 1022xn_rxeof(struct netfront_info *np) 1023{ 1024 struct ifnet *ifp; 1025#if __FreeBSD_version >= 700000 && (defined(INET) || defined(INET6)) 1026 struct lro_ctrl *lro = &np->xn_lro; 1027 struct lro_entry *queued; 1028#endif 1029 struct netfront_rx_info rinfo; 1030 struct netif_rx_response *rx = &rinfo.rx; 1031 struct netif_extra_info *extras = rinfo.extras; 1032 RING_IDX i, rp; 1033 multicall_entry_t *mcl; 1034 struct mbuf *m; 1035 struct mbuf_head rxq, errq; 1036 int err, pages_flipped = 0, work_to_do; 1037 1038 do { 1039 XN_RX_LOCK_ASSERT(np); 1040 if (!netfront_carrier_ok(np)) 1041 return; 1042 1043 mbufq_init(&errq); 1044 mbufq_init(&rxq); 1045 1046 ifp = np->xn_ifp; 1047 1048 rp = np->rx.sring->rsp_prod; 1049 rmb(); /* Ensure we see queued responses up to 'rp'. */ 1050 1051 i = np->rx.rsp_cons; 1052 while ((i != rp)) { 1053 memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); 1054 memset(extras, 0, sizeof(rinfo.extras)); 1055 1056 m = NULL; 1057 err = xennet_get_responses(np, &rinfo, rp, &i, &m, 1058 &pages_flipped); 1059 1060 if (__predict_false(err)) { 1061 if (m) 1062 mbufq_tail(&errq, m); 1063 np->stats.rx_errors++; 1064 continue; 1065 } 1066 1067 m->m_pkthdr.rcvif = ifp; 1068 if ( rx->flags & NETRXF_data_validated ) { 1069 /* Tell the stack the checksums are okay */ 1070 /* 1071 * XXX this isn't necessarily the case - need to add 1072 * check 1073 */ 1074 1075 m->m_pkthdr.csum_flags |= 1076 (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID 1077 | CSUM_PSEUDO_HDR); 1078 m->m_pkthdr.csum_data = 0xffff; 1079 } 1080 1081 np->stats.rx_packets++; 1082 np->stats.rx_bytes += m->m_pkthdr.len; 1083 1084 mbufq_tail(&rxq, m); 1085 np->rx.rsp_cons = i; 1086 } 1087 1088 if (pages_flipped) { 1089 /* Some pages are no longer absent... */ 1090#ifdef notyet 1091 balloon_update_driver_allowance(-pages_flipped); 1092#endif 1093 /* Do all the remapping work, and M->P updates, in one big 1094 * hypercall. 1095 */ 1096 if (!!xen_feature(XENFEAT_auto_translated_physmap)) { 1097 mcl = np->rx_mcl + pages_flipped; 1098 mcl->op = __HYPERVISOR_mmu_update; 1099 mcl->args[0] = (u_long)np->rx_mmu; 1100 mcl->args[1] = pages_flipped; 1101 mcl->args[2] = 0; 1102 mcl->args[3] = DOMID_SELF; 1103 (void)HYPERVISOR_multicall(np->rx_mcl, 1104 pages_flipped + 1); 1105 } 1106 } 1107 1108 while ((m = mbufq_dequeue(&errq))) 1109 m_freem(m); 1110 1111 /* 1112 * Process all the mbufs after the remapping is complete. 1113 * Break the mbuf chain first though. 1114 */ 1115 while ((m = mbufq_dequeue(&rxq)) != NULL) { 1116 ifp->if_ipackets++; 1117 1118 /* 1119 * Do we really need to drop the rx lock? 1120 */ 1121 XN_RX_UNLOCK(np); 1122#if __FreeBSD_version >= 700000 && (defined(INET) || defined(INET6)) 1123 /* Use LRO if possible */ 1124 if ((ifp->if_capenable & IFCAP_LRO) == 0 || 1125 lro->lro_cnt == 0 || tcp_lro_rx(lro, m, 0)) { 1126 /* 1127 * If LRO fails, pass up to the stack 1128 * directly. 1129 */ 1130 (*ifp->if_input)(ifp, m); 1131 } 1132#else 1133 (*ifp->if_input)(ifp, m); 1134#endif 1135 XN_RX_LOCK(np); 1136 } 1137 1138 np->rx.rsp_cons = i; 1139 1140#if __FreeBSD_version >= 700000 && (defined(INET) || defined(INET6)) 1141 /* 1142 * Flush any outstanding LRO work 1143 */ 1144 while (!SLIST_EMPTY(&lro->lro_active)) { 1145 queued = SLIST_FIRST(&lro->lro_active); 1146 SLIST_REMOVE_HEAD(&lro->lro_active, next); 1147 tcp_lro_flush(lro, queued); 1148 } 1149#endif 1150 1151#if 0 1152 /* If we get a callback with very few responses, reduce fill target. */ 1153 /* NB. Note exponential increase, linear decrease. */ 1154 if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > 1155 ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target)) 1156 np->rx_target = np->rx_min_target; 1157#endif 1158 1159 network_alloc_rx_buffers(np); 1160 1161 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, work_to_do); 1162 } while (work_to_do); 1163} 1164 1165static void 1166xn_txeof(struct netfront_info *np) 1167{ 1168 RING_IDX i, prod; 1169 unsigned short id; 1170 struct ifnet *ifp; 1171 netif_tx_response_t *txr; 1172 struct mbuf *m; 1173 1174 XN_TX_LOCK_ASSERT(np); 1175 1176 if (!netfront_carrier_ok(np)) 1177 return; 1178 1179 ifp = np->xn_ifp; 1180 1181 do { 1182 prod = np->tx.sring->rsp_prod; 1183 rmb(); /* Ensure we see responses up to 'rp'. */ 1184 1185 for (i = np->tx.rsp_cons; i != prod; i++) { 1186 txr = RING_GET_RESPONSE(&np->tx, i); 1187 if (txr->status == NETIF_RSP_NULL) 1188 continue; 1189 1190 if (txr->status != NETIF_RSP_OKAY) { 1191 printf("%s: WARNING: response is %d!\n", 1192 __func__, txr->status); 1193 } 1194 id = txr->id; 1195 m = np->tx_mbufs[id]; 1196 KASSERT(m != NULL, ("mbuf not found in xn_tx_chain")); 1197 KASSERT((uintptr_t)m > NET_TX_RING_SIZE, 1198 ("mbuf already on the free list, but we're " 1199 "trying to free it again!")); 1200 M_ASSERTVALID(m); 1201 1202 /* 1203 * Increment packet count if this is the last 1204 * mbuf of the chain. 1205 */ 1206 if (!m->m_next) 1207 ifp->if_opackets++; 1208 if (__predict_false(gnttab_query_foreign_access( 1209 np->grant_tx_ref[id]) != 0)) { 1210 panic("%s: grant id %u still in use by the " 1211 "backend", __func__, id); 1212 } 1213 gnttab_end_foreign_access_ref( 1214 np->grant_tx_ref[id]); 1215 gnttab_release_grant_reference( 1216 &np->gref_tx_head, np->grant_tx_ref[id]); 1217 np->grant_tx_ref[id] = GRANT_REF_INVALID; 1218 1219 np->tx_mbufs[id] = NULL; 1220 add_id_to_freelist(np->tx_mbufs, id); 1221 np->xn_cdata.xn_tx_chain_cnt--; 1222 m_free(m); 1223 /* Only mark the queue active if we've freed up at least one slot to try */ 1224 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1225 } 1226 np->tx.rsp_cons = prod; 1227 1228 /* 1229 * Set a new event, then check for race with update of 1230 * tx_cons. Note that it is essential to schedule a 1231 * callback, no matter how few buffers are pending. Even if 1232 * there is space in the transmit ring, higher layers may 1233 * be blocked because too much data is outstanding: in such 1234 * cases notification from Xen is likely to be the only kick 1235 * that we'll get. 1236 */ 1237 np->tx.sring->rsp_event = 1238 prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; 1239 1240 mb(); 1241 } while (prod != np->tx.sring->rsp_prod); 1242 1243 if (np->tx_full && 1244 ((np->tx.sring->req_prod - prod) < NET_TX_RING_SIZE)) { 1245 np->tx_full = 0; 1246#if 0 1247 if (np->user_state == UST_OPEN) 1248 netif_wake_queue(dev); 1249#endif 1250 } 1251} 1252 1253static void 1254xn_intr(void *xsc) 1255{ 1256 struct netfront_info *np = xsc; 1257 struct ifnet *ifp = np->xn_ifp; 1258 1259#if 0 1260 if (!(np->rx.rsp_cons != np->rx.sring->rsp_prod && 1261 likely(netfront_carrier_ok(np)) && 1262 ifp->if_drv_flags & IFF_DRV_RUNNING)) 1263 return; 1264#endif 1265 if (RING_HAS_UNCONSUMED_RESPONSES(&np->tx)) { 1266 XN_TX_LOCK(np); 1267 xn_txeof(np); 1268 XN_TX_UNLOCK(np); 1269 } 1270 1271 XN_RX_LOCK(np); 1272 xn_rxeof(np); 1273 XN_RX_UNLOCK(np); 1274 1275 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1276 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1277 xn_start(ifp); 1278} 1279 1280static void 1281xennet_move_rx_slot(struct netfront_info *np, struct mbuf *m, 1282 grant_ref_t ref) 1283{ 1284 int new = xennet_rxidx(np->rx.req_prod_pvt); 1285 1286 KASSERT(np->rx_mbufs[new] == NULL, ("rx_mbufs != NULL")); 1287 np->rx_mbufs[new] = m; 1288 np->grant_rx_ref[new] = ref; 1289 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; 1290 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; 1291 np->rx.req_prod_pvt++; 1292} 1293 1294static int 1295xennet_get_extras(struct netfront_info *np, 1296 struct netif_extra_info *extras, RING_IDX rp, RING_IDX *cons) 1297{ 1298 struct netif_extra_info *extra; 1299 1300 int err = 0; 1301 1302 do { 1303 struct mbuf *m; 1304 grant_ref_t ref; 1305 1306 if (__predict_false(*cons + 1 == rp)) { 1307#if 0 1308 if (net_ratelimit()) 1309 WPRINTK("Missing extra info\n"); 1310#endif 1311 err = EINVAL; 1312 break; 1313 } 1314 1315 extra = (struct netif_extra_info *) 1316 RING_GET_RESPONSE(&np->rx, ++(*cons)); 1317 1318 if (__predict_false(!extra->type || 1319 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 1320#if 0 1321 if (net_ratelimit()) 1322 WPRINTK("Invalid extra type: %d\n", 1323 extra->type); 1324#endif 1325 err = EINVAL; 1326 } else { 1327 memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); 1328 } 1329 1330 m = xennet_get_rx_mbuf(np, *cons); 1331 ref = xennet_get_rx_ref(np, *cons); 1332 xennet_move_rx_slot(np, m, ref); 1333 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); 1334 1335 return err; 1336} 1337 1338static int 1339xennet_get_responses(struct netfront_info *np, 1340 struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons, 1341 struct mbuf **list, 1342 int *pages_flipped_p) 1343{ 1344 int pages_flipped = *pages_flipped_p; 1345 struct mmu_update *mmu; 1346 struct multicall_entry *mcl; 1347 struct netif_rx_response *rx = &rinfo->rx; 1348 struct netif_extra_info *extras = rinfo->extras; 1349 struct mbuf *m, *m0, *m_prev; 1350 grant_ref_t ref = xennet_get_rx_ref(np, *cons); 1351 RING_IDX ref_cons = *cons; 1352 int frags = 1; 1353 int err = 0; 1354 u_long ret; 1355 1356 m0 = m = m_prev = xennet_get_rx_mbuf(np, *cons); 1357 1358 if (rx->flags & NETRXF_extra_info) { 1359 err = xennet_get_extras(np, extras, rp, cons); 1360 } 1361 1362 if (m0 != NULL) { 1363 m0->m_pkthdr.len = 0; 1364 m0->m_next = NULL; 1365 } 1366 1367 for (;;) { 1368 u_long mfn; 1369 1370#if 0 1371 DPRINTK("rx->status=%hd rx->offset=%hu frags=%u\n", 1372 rx->status, rx->offset, frags); 1373#endif 1374 if (__predict_false(rx->status < 0 || 1375 rx->offset + rx->status > PAGE_SIZE)) { 1376 1377#if 0 1378 if (net_ratelimit()) 1379 WPRINTK("rx->offset: %x, size: %u\n", 1380 rx->offset, rx->status); 1381#endif 1382 xennet_move_rx_slot(np, m, ref); 1383 if (m0 == m) 1384 m0 = NULL; 1385 m = NULL; 1386 err = EINVAL; 1387 goto next_skip_queue; 1388 } 1389 1390 /* 1391 * This definitely indicates a bug, either in this driver or in 1392 * the backend driver. In future this should flag the bad 1393 * situation to the system controller to reboot the backed. 1394 */ 1395 if (ref == GRANT_REF_INVALID) { 1396 1397#if 0 1398 if (net_ratelimit()) 1399 WPRINTK("Bad rx response id %d.\n", rx->id); 1400#endif 1401 printf("%s: Bad rx response id %d.\n", __func__,rx->id); 1402 err = EINVAL; 1403 goto next; 1404 } 1405 1406 if (!np->copying_receiver) { 1407 /* Memory pressure, insufficient buffer 1408 * headroom, ... 1409 */ 1410 if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) { 1411 WPRINTK("Unfulfilled rx req (id=%d, st=%d).\n", 1412 rx->id, rx->status); 1413 xennet_move_rx_slot(np, m, ref); 1414 err = ENOMEM; 1415 goto next; 1416 } 1417 1418 if (!xen_feature( XENFEAT_auto_translated_physmap)) { 1419 /* Remap the page. */ 1420 void *vaddr = mtod(m, void *); 1421 uint32_t pfn; 1422 1423 mcl = np->rx_mcl + pages_flipped; 1424 mmu = np->rx_mmu + pages_flipped; 1425 1426 MULTI_update_va_mapping(mcl, (u_long)vaddr, 1427 (((vm_paddr_t)mfn) << PAGE_SHIFT) | PG_RW | 1428 PG_V | PG_M | PG_A, 0); 1429 pfn = (uintptr_t)m->m_ext.ext_arg1; 1430 mmu->ptr = ((vm_paddr_t)mfn << PAGE_SHIFT) | 1431 MMU_MACHPHYS_UPDATE; 1432 mmu->val = pfn; 1433 1434 set_phys_to_machine(pfn, mfn); 1435 } 1436 pages_flipped++; 1437 } else { 1438 ret = gnttab_end_foreign_access_ref(ref); 1439 KASSERT(ret, ("ret != 0")); 1440 } 1441 1442 gnttab_release_grant_reference(&np->gref_rx_head, ref); 1443 1444next: 1445 if (m == NULL) 1446 break; 1447 1448 m->m_len = rx->status; 1449 m->m_data += rx->offset; 1450 m0->m_pkthdr.len += rx->status; 1451 1452next_skip_queue: 1453 if (!(rx->flags & NETRXF_more_data)) 1454 break; 1455 1456 if (*cons + frags == rp) { 1457 if (net_ratelimit()) 1458 WPRINTK("Need more frags\n"); 1459 err = ENOENT; 1460 printf("%s: cons %u frags %u rp %u, not enough frags\n", 1461 __func__, *cons, frags, rp); 1462 break; 1463 } 1464 /* 1465 * Note that m can be NULL, if rx->status < 0 or if 1466 * rx->offset + rx->status > PAGE_SIZE above. 1467 */ 1468 m_prev = m; 1469 1470 rx = RING_GET_RESPONSE(&np->rx, *cons + frags); 1471 m = xennet_get_rx_mbuf(np, *cons + frags); 1472 1473 /* 1474 * m_prev == NULL can happen if rx->status < 0 or if 1475 * rx->offset + * rx->status > PAGE_SIZE above. 1476 */ 1477 if (m_prev != NULL) 1478 m_prev->m_next = m; 1479 1480 /* 1481 * m0 can be NULL if rx->status < 0 or if * rx->offset + 1482 * rx->status > PAGE_SIZE above. 1483 */ 1484 if (m0 == NULL) 1485 m0 = m; 1486 m->m_next = NULL; 1487 ref = xennet_get_rx_ref(np, *cons + frags); 1488 ref_cons = *cons + frags; 1489 frags++; 1490 } 1491 *list = m0; 1492 *cons += frags; 1493 *pages_flipped_p = pages_flipped; 1494 1495 return (err); 1496} 1497 1498static void 1499xn_tick_locked(struct netfront_info *sc) 1500{ 1501 XN_RX_LOCK_ASSERT(sc); 1502 callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc); 1503 1504 /* XXX placeholder for printing debug information */ 1505} 1506 1507static void 1508xn_tick(void *xsc) 1509{ 1510 struct netfront_info *sc; 1511 1512 sc = xsc; 1513 XN_RX_LOCK(sc); 1514 xn_tick_locked(sc); 1515 XN_RX_UNLOCK(sc); 1516} 1517 1518/** 1519 * \brief Count the number of fragments in an mbuf chain. 1520 * 1521 * Surprisingly, there isn't an M* macro for this. 1522 */ 1523static inline int 1524xn_count_frags(struct mbuf *m) 1525{ 1526 int nfrags; 1527 1528 for (nfrags = 0; m != NULL; m = m->m_next) 1529 nfrags++; 1530 1531 return (nfrags); 1532} 1533 1534/** 1535 * Given an mbuf chain, make sure we have enough room and then push 1536 * it onto the transmit ring. 1537 */ 1538static int 1539xn_assemble_tx_request(struct netfront_info *sc, struct mbuf *m_head) 1540{ 1541 struct ifnet *ifp; 1542 struct mbuf *m; 1543 u_int nfrags; 1544 netif_extra_info_t *extra; 1545 int otherend_id; 1546 1547 ifp = sc->xn_ifp; 1548 1549 /** 1550 * Defragment the mbuf if necessary. 1551 */ 1552 nfrags = xn_count_frags(m_head); 1553 1554 /* 1555 * Check to see whether this request is longer than netback 1556 * can handle, and try to defrag it. 1557 */ 1558 /** 1559 * It is a bit lame, but the netback driver in Linux can't 1560 * deal with nfrags > MAX_TX_REQ_FRAGS, which is a quirk of 1561 * the Linux network stack. 1562 */ 1563 if (nfrags > sc->maxfrags) { 1564 m = m_defrag(m_head, M_NOWAIT); 1565 if (!m) { 1566 /* 1567 * Defrag failed, so free the mbuf and 1568 * therefore drop the packet. 1569 */ 1570 m_freem(m_head); 1571 return (EMSGSIZE); 1572 } 1573 m_head = m; 1574 } 1575 1576 /* Determine how many fragments now exist */ 1577 nfrags = xn_count_frags(m_head); 1578 1579 /* 1580 * Check to see whether the defragmented packet has too many 1581 * segments for the Linux netback driver. 1582 */ 1583 /** 1584 * The FreeBSD TCP stack, with TSO enabled, can produce a chain 1585 * of mbufs longer than Linux can handle. Make sure we don't 1586 * pass a too-long chain over to the other side by dropping the 1587 * packet. It doesn't look like there is currently a way to 1588 * tell the TCP stack to generate a shorter chain of packets. 1589 */ 1590 if (nfrags > MAX_TX_REQ_FRAGS) { 1591#ifdef DEBUG 1592 printf("%s: nfrags %d > MAX_TX_REQ_FRAGS %d, netback " 1593 "won't be able to handle it, dropping\n", 1594 __func__, nfrags, MAX_TX_REQ_FRAGS); 1595#endif 1596 m_freem(m_head); 1597 return (EMSGSIZE); 1598 } 1599 1600 /* 1601 * This check should be redundant. We've already verified that we 1602 * have enough slots in the ring to handle a packet of maximum 1603 * size, and that our packet is less than the maximum size. Keep 1604 * it in here as an assert for now just to make certain that 1605 * xn_tx_chain_cnt is accurate. 1606 */ 1607 KASSERT((sc->xn_cdata.xn_tx_chain_cnt + nfrags) <= NET_TX_RING_SIZE, 1608 ("%s: xn_tx_chain_cnt (%d) + nfrags (%d) > NET_TX_RING_SIZE " 1609 "(%d)!", __func__, (int) sc->xn_cdata.xn_tx_chain_cnt, 1610 (int) nfrags, (int) NET_TX_RING_SIZE)); 1611 1612 /* 1613 * Start packing the mbufs in this chain into 1614 * the fragment pointers. Stop when we run out 1615 * of fragments or hit the end of the mbuf chain. 1616 */ 1617 m = m_head; 1618 extra = NULL; 1619 otherend_id = xenbus_get_otherend_id(sc->xbdev); 1620 for (m = m_head; m; m = m->m_next) { 1621 netif_tx_request_t *tx; 1622 uintptr_t id; 1623 grant_ref_t ref; 1624 u_long mfn; /* XXX Wrong type? */ 1625 1626 tx = RING_GET_REQUEST(&sc->tx, sc->tx.req_prod_pvt); 1627 id = get_id_from_freelist(sc->tx_mbufs); 1628 if (id == 0) 1629 panic("%s: was allocated the freelist head!\n", 1630 __func__); 1631 sc->xn_cdata.xn_tx_chain_cnt++; 1632 if (sc->xn_cdata.xn_tx_chain_cnt > NET_TX_RING_SIZE) 1633 panic("%s: tx_chain_cnt must be <= NET_TX_RING_SIZE\n", 1634 __func__); 1635 sc->tx_mbufs[id] = m; 1636 tx->id = id; 1637 ref = gnttab_claim_grant_reference(&sc->gref_tx_head); 1638 KASSERT((short)ref >= 0, ("Negative ref")); 1639 mfn = virt_to_mfn(mtod(m, vm_offset_t)); 1640 gnttab_grant_foreign_access_ref(ref, otherend_id, 1641 mfn, GNTMAP_readonly); 1642 tx->gref = sc->grant_tx_ref[id] = ref; 1643 tx->offset = mtod(m, vm_offset_t) & (PAGE_SIZE - 1); 1644 tx->flags = 0; 1645 if (m == m_head) { 1646 /* 1647 * The first fragment has the entire packet 1648 * size, subsequent fragments have just the 1649 * fragment size. The backend works out the 1650 * true size of the first fragment by 1651 * subtracting the sizes of the other 1652 * fragments. 1653 */ 1654 tx->size = m->m_pkthdr.len; 1655 1656 /* 1657 * The first fragment contains the checksum flags 1658 * and is optionally followed by extra data for 1659 * TSO etc. 1660 */ 1661 /** 1662 * CSUM_TSO requires checksum offloading. 1663 * Some versions of FreeBSD fail to 1664 * set CSUM_TCP in the CSUM_TSO case, 1665 * so we have to test for CSUM_TSO 1666 * explicitly. 1667 */ 1668 if (m->m_pkthdr.csum_flags 1669 & (CSUM_DELAY_DATA | CSUM_TSO)) { 1670 tx->flags |= (NETTXF_csum_blank 1671 | NETTXF_data_validated); 1672 } 1673#if __FreeBSD_version >= 700000 1674 if (m->m_pkthdr.csum_flags & CSUM_TSO) { 1675 struct netif_extra_info *gso = 1676 (struct netif_extra_info *) 1677 RING_GET_REQUEST(&sc->tx, 1678 ++sc->tx.req_prod_pvt); 1679 1680 tx->flags |= NETTXF_extra_info; 1681 1682 gso->u.gso.size = m->m_pkthdr.tso_segsz; 1683 gso->u.gso.type = 1684 XEN_NETIF_GSO_TYPE_TCPV4; 1685 gso->u.gso.pad = 0; 1686 gso->u.gso.features = 0; 1687 1688 gso->type = XEN_NETIF_EXTRA_TYPE_GSO; 1689 gso->flags = 0; 1690 } 1691#endif 1692 } else { 1693 tx->size = m->m_len; 1694 } 1695 if (m->m_next) 1696 tx->flags |= NETTXF_more_data; 1697 1698 sc->tx.req_prod_pvt++; 1699 } 1700 BPF_MTAP(ifp, m_head); 1701 1702 sc->stats.tx_bytes += m_head->m_pkthdr.len; 1703 sc->stats.tx_packets++; 1704 1705 return (0); 1706} 1707 1708static void 1709xn_start_locked(struct ifnet *ifp) 1710{ 1711 struct netfront_info *sc; 1712 struct mbuf *m_head; 1713 int notify; 1714 1715 sc = ifp->if_softc; 1716 1717 if (!netfront_carrier_ok(sc)) 1718 return; 1719 1720 /* 1721 * While we have enough transmit slots available for at least one 1722 * maximum-sized packet, pull mbufs off the queue and put them on 1723 * the transmit ring. 1724 */ 1725 while (xn_tx_slot_available(sc)) { 1726 IF_DEQUEUE(&ifp->if_snd, m_head); 1727 if (m_head == NULL) 1728 break; 1729 1730 if (xn_assemble_tx_request(sc, m_head) != 0) 1731 break; 1732 } 1733 1734 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->tx, notify); 1735 if (notify) 1736 xen_intr_signal(sc->xen_intr_handle); 1737 1738 if (RING_FULL(&sc->tx)) { 1739 sc->tx_full = 1; 1740#if 0 1741 netif_stop_queue(dev); 1742#endif 1743 } 1744} 1745 1746static void 1747xn_start(struct ifnet *ifp) 1748{ 1749 struct netfront_info *sc; 1750 sc = ifp->if_softc; 1751 XN_TX_LOCK(sc); 1752 xn_start_locked(ifp); 1753 XN_TX_UNLOCK(sc); 1754} 1755 1756/* equivalent of network_open() in Linux */ 1757static void 1758xn_ifinit_locked(struct netfront_info *sc) 1759{ 1760 struct ifnet *ifp; 1761 1762 XN_LOCK_ASSERT(sc); 1763 1764 ifp = sc->xn_ifp; 1765 1766 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1767 return; 1768 1769 xn_stop(sc); 1770 1771 network_alloc_rx_buffers(sc); 1772 sc->rx.sring->rsp_event = sc->rx.rsp_cons + 1; 1773 1774 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1775 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1776 if_link_state_change(ifp, LINK_STATE_UP); 1777 1778 callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc); 1779} 1780 1781static void 1782xn_ifinit(void *xsc) 1783{ 1784 struct netfront_info *sc = xsc; 1785 1786 XN_LOCK(sc); 1787 xn_ifinit_locked(sc); 1788 XN_UNLOCK(sc); 1789} 1790 1791static int 1792xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1793{ 1794 struct netfront_info *sc = ifp->if_softc; 1795 struct ifreq *ifr = (struct ifreq *) data; 1796#ifdef INET 1797 struct ifaddr *ifa = (struct ifaddr *)data; 1798#endif 1799 1800 int mask, error = 0; 1801 switch(cmd) { 1802 case SIOCSIFADDR: 1803 case SIOCGIFADDR: 1804#ifdef INET 1805 XN_LOCK(sc); 1806 if (ifa->ifa_addr->sa_family == AF_INET) { 1807 ifp->if_flags |= IFF_UP; 1808 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 1809 xn_ifinit_locked(sc); 1810 arp_ifinit(ifp, ifa); 1811 XN_UNLOCK(sc); 1812 } else { 1813 XN_UNLOCK(sc); 1814#endif 1815 error = ether_ioctl(ifp, cmd, data); 1816#ifdef INET 1817 } 1818#endif 1819 break; 1820 case SIOCSIFMTU: 1821 /* XXX can we alter the MTU on a VN ?*/ 1822#ifdef notyet 1823 if (ifr->ifr_mtu > XN_JUMBO_MTU) 1824 error = EINVAL; 1825 else 1826#endif 1827 { 1828 ifp->if_mtu = ifr->ifr_mtu; 1829 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1830 xn_ifinit(sc); 1831 } 1832 break; 1833 case SIOCSIFFLAGS: 1834 XN_LOCK(sc); 1835 if (ifp->if_flags & IFF_UP) { 1836 /* 1837 * If only the state of the PROMISC flag changed, 1838 * then just use the 'set promisc mode' command 1839 * instead of reinitializing the entire NIC. Doing 1840 * a full re-init means reloading the firmware and 1841 * waiting for it to start up, which may take a 1842 * second or two. 1843 */ 1844#ifdef notyet 1845 /* No promiscuous mode with Xen */ 1846 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1847 ifp->if_flags & IFF_PROMISC && 1848 !(sc->xn_if_flags & IFF_PROMISC)) { 1849 XN_SETBIT(sc, XN_RX_MODE, 1850 XN_RXMODE_RX_PROMISC); 1851 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1852 !(ifp->if_flags & IFF_PROMISC) && 1853 sc->xn_if_flags & IFF_PROMISC) { 1854 XN_CLRBIT(sc, XN_RX_MODE, 1855 XN_RXMODE_RX_PROMISC); 1856 } else 1857#endif 1858 xn_ifinit_locked(sc); 1859 } else { 1860 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1861 xn_stop(sc); 1862 } 1863 } 1864 sc->xn_if_flags = ifp->if_flags; 1865 XN_UNLOCK(sc); 1866 error = 0; 1867 break; 1868 case SIOCSIFCAP: 1869 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1870 if (mask & IFCAP_TXCSUM) { 1871 if (IFCAP_TXCSUM & ifp->if_capenable) { 1872 ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4); 1873 ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP 1874 | CSUM_IP | CSUM_TSO); 1875 } else { 1876 ifp->if_capenable |= IFCAP_TXCSUM; 1877 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP 1878 | CSUM_IP); 1879 } 1880 } 1881 if (mask & IFCAP_RXCSUM) { 1882 ifp->if_capenable ^= IFCAP_RXCSUM; 1883 } 1884#if __FreeBSD_version >= 700000 1885 if (mask & IFCAP_TSO4) { 1886 if (IFCAP_TSO4 & ifp->if_capenable) { 1887 ifp->if_capenable &= ~IFCAP_TSO4; 1888 ifp->if_hwassist &= ~CSUM_TSO; 1889 } else if (IFCAP_TXCSUM & ifp->if_capenable) { 1890 ifp->if_capenable |= IFCAP_TSO4; 1891 ifp->if_hwassist |= CSUM_TSO; 1892 } else { 1893 IPRINTK("Xen requires tx checksum offload" 1894 " be enabled to use TSO\n"); 1895 error = EINVAL; 1896 } 1897 } 1898 if (mask & IFCAP_LRO) { 1899 ifp->if_capenable ^= IFCAP_LRO; 1900 1901 } 1902#endif 1903 error = 0; 1904 break; 1905 case SIOCADDMULTI: 1906 case SIOCDELMULTI: 1907#ifdef notyet 1908 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1909 XN_LOCK(sc); 1910 xn_setmulti(sc); 1911 XN_UNLOCK(sc); 1912 error = 0; 1913 } 1914#endif 1915 /* FALLTHROUGH */ 1916 case SIOCSIFMEDIA: 1917 case SIOCGIFMEDIA: 1918 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 1919 break; 1920 default: 1921 error = ether_ioctl(ifp, cmd, data); 1922 } 1923 1924 return (error); 1925} 1926 1927static void 1928xn_stop(struct netfront_info *sc) 1929{ 1930 struct ifnet *ifp; 1931 1932 XN_LOCK_ASSERT(sc); 1933 1934 ifp = sc->xn_ifp; 1935 1936 callout_stop(&sc->xn_stat_ch); 1937 1938 xn_free_rx_ring(sc); 1939 xn_free_tx_ring(sc); 1940 1941 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1942 if_link_state_change(ifp, LINK_STATE_DOWN); 1943} 1944 1945/* START of Xenolinux helper functions adapted to FreeBSD */ 1946int 1947network_connect(struct netfront_info *np) 1948{ 1949 int i, requeue_idx, error; 1950 grant_ref_t ref; 1951 netif_rx_request_t *req; 1952 u_int feature_rx_copy, feature_rx_flip; 1953 1954 error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), 1955 "feature-rx-copy", NULL, "%u", &feature_rx_copy); 1956 if (error) 1957 feature_rx_copy = 0; 1958 error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), 1959 "feature-rx-flip", NULL, "%u", &feature_rx_flip); 1960 if (error) 1961 feature_rx_flip = 1; 1962 1963 /* 1964 * Copy packets on receive path if: 1965 * (a) This was requested by user, and the backend supports it; or 1966 * (b) Flipping was requested, but this is unsupported by the backend. 1967 */ 1968 np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) || 1969 (MODPARM_rx_flip && !feature_rx_flip)); 1970 1971 /* Recovery procedure: */ 1972 error = talk_to_backend(np->xbdev, np); 1973 if (error) 1974 return (error); 1975 1976 /* Step 1: Reinitialise variables. */ 1977 xn_query_features(np); 1978 xn_configure_features(np); 1979 netif_release_tx_bufs(np); 1980 1981 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ 1982 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { 1983 struct mbuf *m; 1984 u_long pfn; 1985 1986 if (np->rx_mbufs[i] == NULL) 1987 continue; 1988 1989 m = np->rx_mbufs[requeue_idx] = xennet_get_rx_mbuf(np, i); 1990 ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); 1991 1992 req = RING_GET_REQUEST(&np->rx, requeue_idx); 1993 pfn = vtophys(mtod(m, vm_offset_t)) >> PAGE_SHIFT; 1994 1995 if (!np->copying_receiver) { 1996 gnttab_grant_foreign_transfer_ref(ref, 1997 xenbus_get_otherend_id(np->xbdev), 1998 pfn); 1999 } else { 2000 gnttab_grant_foreign_access_ref(ref, 2001 xenbus_get_otherend_id(np->xbdev), 2002 PFNTOMFN(pfn), 0); 2003 } 2004 req->gref = ref; 2005 req->id = requeue_idx; 2006 2007 requeue_idx++; 2008 } 2009 2010 np->rx.req_prod_pvt = requeue_idx; 2011 2012 /* Step 3: All public and private state should now be sane. Get 2013 * ready to start sending and receiving packets and give the driver 2014 * domain a kick because we've probably just requeued some 2015 * packets. 2016 */ 2017 netfront_carrier_on(np); 2018 xen_intr_signal(np->xen_intr_handle); 2019 XN_TX_LOCK(np); 2020 xn_txeof(np); 2021 XN_TX_UNLOCK(np); 2022 network_alloc_rx_buffers(np); 2023 2024 return (0); 2025} 2026 2027static void 2028xn_query_features(struct netfront_info *np) 2029{ 2030 int val; 2031 2032 device_printf(np->xbdev, "backend features:"); 2033 2034 if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), 2035 "feature-sg", NULL, "%d", &val) < 0) 2036 val = 0; 2037 2038 np->maxfrags = 1; 2039 if (val) { 2040 np->maxfrags = MAX_TX_REQ_FRAGS; 2041 printf(" feature-sg"); 2042 } 2043 2044 if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), 2045 "feature-gso-tcpv4", NULL, "%d", &val) < 0) 2046 val = 0; 2047 2048 np->xn_ifp->if_capabilities &= ~(IFCAP_TSO4|IFCAP_LRO); 2049 if (val) { 2050 np->xn_ifp->if_capabilities |= IFCAP_TSO4|IFCAP_LRO; 2051 printf(" feature-gso-tcp4"); 2052 } 2053 2054 printf("\n"); 2055} 2056 2057static int 2058xn_configure_features(struct netfront_info *np) 2059{ 2060 int err, cap_enabled; 2061 2062 err = 0; 2063 2064 if (np->xn_resume && 2065 ((np->xn_ifp->if_capenable & np->xn_ifp->if_capabilities) 2066 == np->xn_ifp->if_capenable)) { 2067 /* Current options are available, no need to do anything. */ 2068 return (0); 2069 } 2070 2071 /* Try to preserve as many options as possible. */ 2072 if (np->xn_resume) 2073 cap_enabled = np->xn_ifp->if_capenable; 2074 else 2075 cap_enabled = UINT_MAX; 2076 2077#if __FreeBSD_version >= 700000 && (defined(INET) || defined(INET6)) 2078 if ((np->xn_ifp->if_capenable & IFCAP_LRO) == (cap_enabled & IFCAP_LRO)) 2079 tcp_lro_free(&np->xn_lro); 2080#endif 2081 np->xn_ifp->if_capenable = 2082 np->xn_ifp->if_capabilities & ~(IFCAP_LRO|IFCAP_TSO4) & cap_enabled; 2083 np->xn_ifp->if_hwassist &= ~CSUM_TSO; 2084#if __FreeBSD_version >= 700000 && (defined(INET) || defined(INET6)) 2085 if (xn_enable_lro && (np->xn_ifp->if_capabilities & IFCAP_LRO) == 2086 (cap_enabled & IFCAP_LRO)) { 2087 err = tcp_lro_init(&np->xn_lro); 2088 if (err) { 2089 device_printf(np->xbdev, "LRO initialization failed\n"); 2090 } else { 2091 np->xn_lro.ifp = np->xn_ifp; 2092 np->xn_ifp->if_capenable |= IFCAP_LRO; 2093 } 2094 } 2095 if ((np->xn_ifp->if_capabilities & IFCAP_TSO4) == 2096 (cap_enabled & IFCAP_TSO4)) { 2097 np->xn_ifp->if_capenable |= IFCAP_TSO4; 2098 np->xn_ifp->if_hwassist |= CSUM_TSO; 2099 } 2100#endif 2101 return (err); 2102} 2103 2104/** 2105 * Create a network device. 2106 * @param dev Newbus device representing this virtual NIC. 2107 */ 2108int 2109create_netdev(device_t dev) 2110{ 2111 int i; 2112 struct netfront_info *np; 2113 int err; 2114 struct ifnet *ifp; 2115 2116 np = device_get_softc(dev); 2117 2118 np->xbdev = dev; 2119 2120 XN_LOCK_INIT(np, xennetif); 2121 2122 ifmedia_init(&np->sc_media, 0, xn_ifmedia_upd, xn_ifmedia_sts); 2123 ifmedia_add(&np->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 2124 ifmedia_set(&np->sc_media, IFM_ETHER|IFM_MANUAL); 2125 2126 np->rx_target = RX_MIN_TARGET; 2127 np->rx_min_target = RX_MIN_TARGET; 2128 np->rx_max_target = RX_MAX_TARGET; 2129 2130 /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */ 2131 for (i = 0; i <= NET_TX_RING_SIZE; i++) { 2132 np->tx_mbufs[i] = (void *) ((u_long) i+1); 2133 np->grant_tx_ref[i] = GRANT_REF_INVALID; 2134 } 2135 np->tx_mbufs[NET_TX_RING_SIZE] = (void *)0; 2136 2137 for (i = 0; i <= NET_RX_RING_SIZE; i++) { 2138 2139 np->rx_mbufs[i] = NULL; 2140 np->grant_rx_ref[i] = GRANT_REF_INVALID; 2141 } 2142 /* A grant for every tx ring slot */ 2143 if (gnttab_alloc_grant_references(NET_TX_RING_SIZE, 2144 &np->gref_tx_head) != 0) { 2145 IPRINTK("#### netfront can't alloc tx grant refs\n"); 2146 err = ENOMEM; 2147 goto exit; 2148 } 2149 /* A grant for every rx ring slot */ 2150 if (gnttab_alloc_grant_references(RX_MAX_TARGET, 2151 &np->gref_rx_head) != 0) { 2152 WPRINTK("#### netfront can't alloc rx grant refs\n"); 2153 gnttab_free_grant_references(np->gref_tx_head); 2154 err = ENOMEM; 2155 goto exit; 2156 } 2157 2158 err = xen_net_read_mac(dev, np->mac); 2159 if (err) 2160 goto out; 2161 2162 /* Set up ifnet structure */ 2163 ifp = np->xn_ifp = if_alloc(IFT_ETHER); 2164 ifp->if_softc = np; 2165 if_initname(ifp, "xn", device_get_unit(dev)); 2166 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2167 ifp->if_ioctl = xn_ioctl; 2168 ifp->if_output = ether_output; 2169 ifp->if_start = xn_start; 2170#ifdef notyet 2171 ifp->if_watchdog = xn_watchdog; 2172#endif 2173 ifp->if_init = xn_ifinit; 2174 ifp->if_snd.ifq_maxlen = NET_TX_RING_SIZE - 1; 2175 2176 ifp->if_hwassist = XN_CSUM_FEATURES; 2177 ifp->if_capabilities = IFCAP_HWCSUM; 2178 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 2179 ifp->if_hw_tsomaxsegcount = MAX_TX_REQ_FRAGS; 2180 ifp->if_hw_tsomaxsegsize = PAGE_SIZE; 2181 2182 ether_ifattach(ifp, np->mac); 2183 callout_init(&np->xn_stat_ch, 1); 2184 netfront_carrier_off(np); 2185 2186 return (0); 2187 2188exit: 2189 gnttab_free_grant_references(np->gref_tx_head); 2190out: 2191 return (err); 2192} 2193 2194/** 2195 * Handle the change of state of the backend to Closing. We must delete our 2196 * device-layer structures now, to ensure that writes are flushed through to 2197 * the backend. Once is this done, we can switch to Closed in 2198 * acknowledgement. 2199 */ 2200#if 0 2201static void 2202netfront_closing(device_t dev) 2203{ 2204#if 0 2205 struct netfront_info *info = dev->dev_driver_data; 2206 2207 DPRINTK("netfront_closing: %s removed\n", dev->nodename); 2208 2209 close_netdev(info); 2210#endif 2211 xenbus_switch_state(dev, XenbusStateClosed); 2212} 2213#endif 2214 2215static int 2216netfront_detach(device_t dev) 2217{ 2218 struct netfront_info *info = device_get_softc(dev); 2219 2220 DPRINTK("%s\n", xenbus_get_node(dev)); 2221 2222 netif_free(info); 2223 2224 return 0; 2225} 2226 2227static void 2228netif_free(struct netfront_info *info) 2229{ 2230 XN_LOCK(info); 2231 xn_stop(info); 2232 XN_UNLOCK(info); 2233 callout_drain(&info->xn_stat_ch); 2234 netif_disconnect_backend(info); 2235 if (info->xn_ifp != NULL) { 2236 ether_ifdetach(info->xn_ifp); 2237 if_free(info->xn_ifp); 2238 info->xn_ifp = NULL; 2239 } 2240 ifmedia_removeall(&info->sc_media); 2241 netif_release_tx_bufs(info); 2242 if (info->copying_receiver) 2243 netif_release_rx_bufs_copy(info); 2244 2245 gnttab_free_grant_references(info->gref_tx_head); 2246 gnttab_free_grant_references(info->gref_rx_head); 2247} 2248 2249static void 2250netif_disconnect_backend(struct netfront_info *info) 2251{ 2252 XN_RX_LOCK(info); 2253 XN_TX_LOCK(info); 2254 netfront_carrier_off(info); 2255 XN_TX_UNLOCK(info); 2256 XN_RX_UNLOCK(info); 2257 2258 free_ring(&info->tx_ring_ref, &info->tx.sring); 2259 free_ring(&info->rx_ring_ref, &info->rx.sring); 2260 2261 xen_intr_unbind(&info->xen_intr_handle); 2262} 2263 2264static void 2265free_ring(int *ref, void *ring_ptr_ref) 2266{ 2267 void **ring_ptr_ptr = ring_ptr_ref; 2268 2269 if (*ref != GRANT_REF_INVALID) { 2270 /* This API frees the associated storage. */ 2271 gnttab_end_foreign_access(*ref, *ring_ptr_ptr); 2272 *ref = GRANT_REF_INVALID; 2273 } 2274 *ring_ptr_ptr = NULL; 2275} 2276 2277static int 2278xn_ifmedia_upd(struct ifnet *ifp) 2279{ 2280 return (0); 2281} 2282 2283static void 2284xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2285{ 2286 ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE; 2287 ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; 2288} 2289 2290/* ** Driver registration ** */ 2291static device_method_t netfront_methods[] = { 2292 /* Device interface */ 2293 DEVMETHOD(device_probe, netfront_probe), 2294 DEVMETHOD(device_attach, netfront_attach), 2295 DEVMETHOD(device_detach, netfront_detach), 2296 DEVMETHOD(device_shutdown, bus_generic_shutdown), 2297 DEVMETHOD(device_suspend, netfront_suspend), 2298 DEVMETHOD(device_resume, netfront_resume), 2299 2300 /* Xenbus interface */ 2301 DEVMETHOD(xenbus_otherend_changed, netfront_backend_changed), 2302 2303 DEVMETHOD_END 2304}; 2305 2306static driver_t netfront_driver = { 2307 "xn", 2308 netfront_methods, 2309 sizeof(struct netfront_info), 2310}; 2311devclass_t netfront_devclass; 2312 2313DRIVER_MODULE(xe, xenbusb_front, netfront_driver, netfront_devclass, NULL, 2314 NULL); 2315