1/*- 2 * Copyright (c) 2009-2011 Spectra Logic Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * substantially similar to the "NO WARRANTY" disclaimer below 13 * ("Disclaimer") and any redistribution must be conditioned upon 14 * including a substantially similar Disclaimer requirement for further 15 * binary redistribution. 16 * 17 * NO WARRANTY 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGES. 29 * 30 * Authors: Justin T. Gibbs (Spectra Logic Corporation) 31 * Alan Somers (Spectra Logic Corporation) 32 * John Suykerbuyk (Spectra Logic Corporation) 33 */ 34 35#include <sys/cdefs.h> 36__FBSDID("$FreeBSD: stable/10/sys/dev/xen/netback/netback.c 319222 2017-05-30 16:15:52Z asomers $"); 37 38/** 39 * \file netback.c 40 * 41 * \brief Device driver supporting the vending of network access 42 * from this FreeBSD domain to other domains. 43 */ 44#include "opt_inet.h" 45#include "opt_inet6.h" 46#include "opt_global.h" 47 48#include "opt_sctp.h" 49 50#include <sys/param.h> 51#include <sys/kernel.h> 52 53#include <sys/bus.h> 54#include <sys/module.h> 55#include <sys/rman.h> 56#include <sys/socket.h> 57#include <sys/sockio.h> 58#include <sys/sysctl.h> 59 60#include <net/if.h> 61#include <net/if_arp.h> 62#include <net/ethernet.h> 63#include <net/if_dl.h> 64#include <net/if_media.h> 65#include <net/if_types.h> 66 67#include <netinet/in.h> 68#include <netinet/ip.h> 69#include <netinet/if_ether.h> 70#if __FreeBSD_version >= 700000 71#include <netinet/tcp.h> 72#endif 73#include <netinet/ip_icmp.h> 74#include <netinet/udp.h> 75#include <machine/in_cksum.h> 76 77#include <vm/vm.h> 78#include <vm/pmap.h> 79#include <vm/vm_extern.h> 80#include <vm/vm_kern.h> 81 82#include <machine/_inttypes.h> 83 84#include <xen/xen-os.h> 85#include <xen/hypervisor.h> 86#include <xen/xen_intr.h> 87#include <xen/interface/io/netif.h> 88#include <xen/xenbus/xenbusvar.h> 89 90#include <machine/xen/xenvar.h> 91 92/*--------------------------- Compile-time Tunables --------------------------*/ 93 94/*---------------------------------- Macros ----------------------------------*/ 95/** 96 * Custom malloc type for all driver allocations. 97 */ 98static MALLOC_DEFINE(M_XENNETBACK, "xnb", "Xen Net Back Driver Data"); 99 100#define XNB_SG 1 /* netback driver supports feature-sg */ 101#define XNB_GSO_TCPV4 1 /* netback driver supports feature-gso-tcpv4 */ 102#define XNB_RX_COPY 1 /* netback driver supports feature-rx-copy */ 103#define XNB_RX_FLIP 0 /* netback driver does not support feature-rx-flip */ 104 105#undef XNB_DEBUG 106#define XNB_DEBUG /* hardcode on during development */ 107 108#ifdef XNB_DEBUG 109#define DPRINTF(fmt, args...) \ 110 printf("xnb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args) 111#else 112#define DPRINTF(fmt, args...) do {} while (0) 113#endif 114 115/* Default length for stack-allocated grant tables */ 116#define GNTTAB_LEN (64) 117 118/* Features supported by all backends. TSO and LRO can be negotiated */ 119#define XNB_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 120 121#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE) 122#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE) 123 124/** 125 * Two argument version of the standard macro. Second argument is a tentative 126 * value of req_cons 127 */ 128#define RING_HAS_UNCONSUMED_REQUESTS_2(_r, cons) ({ \ 129 unsigned int req = (_r)->sring->req_prod - cons; \ 130 unsigned int rsp = RING_SIZE(_r) - \ 131 (cons - (_r)->rsp_prod_pvt); \ 132 req < rsp ? req : rsp; \ 133}) 134 135#define virt_to_mfn(x) (vtomach(x) >> PAGE_SHIFT) 136#define virt_to_offset(x) ((x) & (PAGE_SIZE - 1)) 137 138/** 139 * Predefined array type of grant table copy descriptors. Used to pass around 140 * statically allocated memory structures. 141 */ 142typedef struct gnttab_copy gnttab_copy_table[GNTTAB_LEN]; 143 144/*--------------------------- Forward Declarations ---------------------------*/ 145struct xnb_softc; 146struct xnb_pkt; 147 148static void xnb_attach_failed(struct xnb_softc *xnb, 149 int err, const char *fmt, ...) 150 __printflike(3,4); 151static int xnb_shutdown(struct xnb_softc *xnb); 152static int create_netdev(device_t dev); 153static int xnb_detach(device_t dev); 154static int xen_net_read_mac(device_t dev, uint8_t mac[]); 155static int xnb_ifmedia_upd(struct ifnet *ifp); 156static void xnb_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); 157static void xnb_intr(void *arg); 158static int xnb_send(netif_rx_back_ring_t *rxb, domid_t otherend, 159 const struct mbuf *mbufc, gnttab_copy_table gnttab); 160static int xnb_recv(netif_tx_back_ring_t *txb, domid_t otherend, 161 struct mbuf **mbufc, struct ifnet *ifnet, 162 gnttab_copy_table gnttab); 163static int xnb_ring2pkt(struct xnb_pkt *pkt, 164 const netif_tx_back_ring_t *tx_ring, 165 RING_IDX start); 166static void xnb_txpkt2rsp(const struct xnb_pkt *pkt, 167 netif_tx_back_ring_t *ring, int error); 168static struct mbuf *xnb_pkt2mbufc(const struct xnb_pkt *pkt, struct ifnet *ifp); 169static int xnb_txpkt2gnttab(const struct xnb_pkt *pkt, 170 const struct mbuf *mbufc, 171 gnttab_copy_table gnttab, 172 const netif_tx_back_ring_t *txb, 173 domid_t otherend_id); 174static void xnb_update_mbufc(struct mbuf *mbufc, 175 const gnttab_copy_table gnttab, int n_entries); 176static int xnb_mbufc2pkt(const struct mbuf *mbufc, 177 struct xnb_pkt *pkt, 178 RING_IDX start, int space); 179static int xnb_rxpkt2gnttab(const struct xnb_pkt *pkt, 180 const struct mbuf *mbufc, 181 gnttab_copy_table gnttab, 182 const netif_rx_back_ring_t *rxb, 183 domid_t otherend_id); 184static int xnb_rxpkt2rsp(const struct xnb_pkt *pkt, 185 const gnttab_copy_table gnttab, int n_entries, 186 netif_rx_back_ring_t *ring); 187static void xnb_stop(struct xnb_softc*); 188static int xnb_ioctl(struct ifnet*, u_long, caddr_t); 189static void xnb_start_locked(struct ifnet*); 190static void xnb_start(struct ifnet*); 191static void xnb_ifinit_locked(struct xnb_softc*); 192static void xnb_ifinit(void*); 193#ifdef XNB_DEBUG 194static int xnb_unit_test_main(SYSCTL_HANDLER_ARGS); 195static int xnb_dump_rings(SYSCTL_HANDLER_ARGS); 196#endif 197#if defined(INET) || defined(INET6) 198static void xnb_add_mbuf_cksum(struct mbuf *mbufc); 199#endif 200/*------------------------------ Data Structures -----------------------------*/ 201 202 203/** 204 * Representation of a xennet packet. Simplified version of a packet as 205 * stored in the Xen tx ring. Applicable to both RX and TX packets 206 */ 207struct xnb_pkt{ 208 /** 209 * Array index of the first data-bearing (eg, not extra info) entry 210 * for this packet 211 */ 212 RING_IDX car; 213 214 /** 215 * Array index of the second data-bearing entry for this packet. 216 * Invalid if the packet has only one data-bearing entry. If the 217 * packet has more than two data-bearing entries, then the second 218 * through the last will be sequential modulo the ring size 219 */ 220 RING_IDX cdr; 221 222 /** 223 * Optional extra info. Only valid if flags contains 224 * NETTXF_extra_info. Note that extra.type will always be 225 * XEN_NETIF_EXTRA_TYPE_GSO. Currently, no known netfront or netback 226 * driver will ever set XEN_NETIF_EXTRA_TYPE_MCAST_* 227 */ 228 netif_extra_info_t extra; 229 230 /** Size of entire packet in bytes. */ 231 uint16_t size; 232 233 /** The size of the first entry's data in bytes */ 234 uint16_t car_size; 235 236 /** 237 * Either NETTXF_ or NETRXF_ flags. Note that the flag values are 238 * not the same for TX and RX packets 239 */ 240 uint16_t flags; 241 242 /** 243 * The number of valid data-bearing entries (either netif_tx_request's 244 * or netif_rx_response's) in the packet. If this is 0, it means the 245 * entire packet is invalid. 246 */ 247 uint16_t list_len; 248 249 /** There was an error processing the packet */ 250 uint8_t error; 251}; 252 253/** xnb_pkt method: initialize it */ 254static inline void 255xnb_pkt_initialize(struct xnb_pkt *pxnb) 256{ 257 bzero(pxnb, sizeof(*pxnb)); 258} 259 260/** xnb_pkt method: mark the packet as valid */ 261static inline void 262xnb_pkt_validate(struct xnb_pkt *pxnb) 263{ 264 pxnb->error = 0; 265}; 266 267/** xnb_pkt method: mark the packet as invalid */ 268static inline void 269xnb_pkt_invalidate(struct xnb_pkt *pxnb) 270{ 271 pxnb->error = 1; 272}; 273 274/** xnb_pkt method: Check whether the packet is valid */ 275static inline int 276xnb_pkt_is_valid(const struct xnb_pkt *pxnb) 277{ 278 return (! pxnb->error); 279} 280 281#ifdef XNB_DEBUG 282/** xnb_pkt method: print the packet's contents in human-readable format*/ 283static void __unused 284xnb_dump_pkt(const struct xnb_pkt *pkt) { 285 if (pkt == NULL) { 286 DPRINTF("Was passed a null pointer.\n"); 287 return; 288 } 289 DPRINTF("pkt address= %p\n", pkt); 290 DPRINTF("pkt->size=%d\n", pkt->size); 291 DPRINTF("pkt->car_size=%d\n", pkt->car_size); 292 DPRINTF("pkt->flags=0x%04x\n", pkt->flags); 293 DPRINTF("pkt->list_len=%d\n", pkt->list_len); 294 /* DPRINTF("pkt->extra"); TODO */ 295 DPRINTF("pkt->car=%d\n", pkt->car); 296 DPRINTF("pkt->cdr=%d\n", pkt->cdr); 297 DPRINTF("pkt->error=%d\n", pkt->error); 298} 299#endif /* XNB_DEBUG */ 300 301static void 302xnb_dump_txreq(RING_IDX idx, const struct netif_tx_request *txreq) 303{ 304 if (txreq != NULL) { 305 DPRINTF("netif_tx_request index =%u\n", idx); 306 DPRINTF("netif_tx_request.gref =%u\n", txreq->gref); 307 DPRINTF("netif_tx_request.offset=%hu\n", txreq->offset); 308 DPRINTF("netif_tx_request.flags =%hu\n", txreq->flags); 309 DPRINTF("netif_tx_request.id =%hu\n", txreq->id); 310 DPRINTF("netif_tx_request.size =%hu\n", txreq->size); 311 } 312} 313 314 315/** 316 * \brief Configuration data for a shared memory request ring 317 * used to communicate with the front-end client of this 318 * this driver. 319 */ 320struct xnb_ring_config { 321 /** 322 * Runtime structures for ring access. Unfortunately, TX and RX rings 323 * use different data structures, and that cannot be changed since it 324 * is part of the interdomain protocol. 325 */ 326 union{ 327 netif_rx_back_ring_t rx_ring; 328 netif_tx_back_ring_t tx_ring; 329 } back_ring; 330 331 /** 332 * The device bus address returned by the hypervisor when 333 * mapping the ring and required to unmap it when a connection 334 * is torn down. 335 */ 336 uint64_t bus_addr; 337 338 /** The pseudo-physical address where ring memory is mapped.*/ 339 uint64_t gnt_addr; 340 341 /** KVA address where ring memory is mapped. */ 342 vm_offset_t va; 343 344 /** 345 * Grant table handles, one per-ring page, returned by the 346 * hyperpervisor upon mapping of the ring and required to 347 * unmap it when a connection is torn down. 348 */ 349 grant_handle_t handle; 350 351 /** The number of ring pages mapped for the current connection. */ 352 unsigned ring_pages; 353 354 /** 355 * The grant references, one per-ring page, supplied by the 356 * front-end, allowing us to reference the ring pages in the 357 * front-end's domain and to map these pages into our own domain. 358 */ 359 grant_ref_t ring_ref; 360}; 361 362/** 363 * Per-instance connection state flags. 364 */ 365typedef enum 366{ 367 /** Communication with the front-end has been established. */ 368 XNBF_RING_CONNECTED = 0x01, 369 370 /** 371 * Front-end requests exist in the ring and are waiting for 372 * xnb_xen_req objects to free up. 373 */ 374 XNBF_RESOURCE_SHORTAGE = 0x02, 375 376 /** Connection teardown has started. */ 377 XNBF_SHUTDOWN = 0x04, 378 379 /** A thread is already performing shutdown processing. */ 380 XNBF_IN_SHUTDOWN = 0x08 381} xnb_flag_t; 382 383/** 384 * Types of rings. Used for array indices and to identify a ring's control 385 * data structure type 386 */ 387typedef enum{ 388 XNB_RING_TYPE_TX = 0, /* ID of TX rings, used for array indices */ 389 XNB_RING_TYPE_RX = 1, /* ID of RX rings, used for array indices */ 390 XNB_NUM_RING_TYPES 391} xnb_ring_type_t; 392 393/** 394 * Per-instance configuration data. 395 */ 396struct xnb_softc { 397 /** NewBus device corresponding to this instance. */ 398 device_t dev; 399 400 /* Media related fields */ 401 402 /** Generic network media state */ 403 struct ifmedia sc_media; 404 405 /** Media carrier info */ 406 struct ifnet *xnb_ifp; 407 408 /** Our own private carrier state */ 409 unsigned carrier; 410 411 /** Device MAC Address */ 412 uint8_t mac[ETHER_ADDR_LEN]; 413 414 /* Xen related fields */ 415 416 /** 417 * \brief The netif protocol abi in effect. 418 * 419 * There are situations where the back and front ends can 420 * have a different, native abi (e.g. intel x86_64 and 421 * 32bit x86 domains on the same machine). The back-end 422 * always accomodates the front-end's native abi. That 423 * value is pulled from the XenStore and recorded here. 424 */ 425 int abi; 426 427 /** 428 * Name of the bridge to which this VIF is connected, if any 429 * This field is dynamically allocated by xenbus and must be free()ed 430 * when no longer needed 431 */ 432 char *bridge; 433 434 /** The interrupt driven even channel used to signal ring events. */ 435 evtchn_port_t evtchn; 436 437 /** Xen device handle.*/ 438 long handle; 439 440 /** Handle to the communication ring event channel. */ 441 xen_intr_handle_t xen_intr_handle; 442 443 /** 444 * \brief Cached value of the front-end's domain id. 445 * 446 * This value is used at once for each mapped page in 447 * a transaction. We cache it to avoid incuring the 448 * cost of an ivar access every time this is needed. 449 */ 450 domid_t otherend_id; 451 452 /** 453 * Undocumented frontend feature. Has something to do with 454 * scatter/gather IO 455 */ 456 uint8_t can_sg; 457 /** Undocumented frontend feature */ 458 uint8_t gso; 459 /** Undocumented frontend feature */ 460 uint8_t gso_prefix; 461 /** Can checksum TCP/UDP over IPv4 */ 462 uint8_t ip_csum; 463 464 /* Implementation related fields */ 465 /** 466 * Preallocated grant table copy descriptor for RX operations. 467 * Access must be protected by rx_lock 468 */ 469 gnttab_copy_table rx_gnttab; 470 471 /** 472 * Preallocated grant table copy descriptor for TX operations. 473 * Access must be protected by tx_lock 474 */ 475 gnttab_copy_table tx_gnttab; 476 477#ifdef XENHVM 478 /** 479 * Resource representing allocated physical address space 480 * associated with our per-instance kva region. 481 */ 482 struct resource *pseudo_phys_res; 483 484 /** Resource id for allocated physical address space. */ 485 int pseudo_phys_res_id; 486#endif 487 488 /** Ring mapping and interrupt configuration data. */ 489 struct xnb_ring_config ring_configs[XNB_NUM_RING_TYPES]; 490 491 /** 492 * Global pool of kva used for mapping remote domain ring 493 * and I/O transaction data. 494 */ 495 vm_offset_t kva; 496 497 /** Psuedo-physical address corresponding to kva. */ 498 uint64_t gnt_base_addr; 499 500 /** Various configuration and state bit flags. */ 501 xnb_flag_t flags; 502 503 /** Mutex protecting per-instance data in the receive path. */ 504 struct mtx rx_lock; 505 506 /** Mutex protecting per-instance data in the softc structure. */ 507 struct mtx sc_lock; 508 509 /** Mutex protecting per-instance data in the transmit path. */ 510 struct mtx tx_lock; 511 512 /** The size of the global kva pool. */ 513 int kva_size; 514}; 515 516/*---------------------------- Debugging functions ---------------------------*/ 517#ifdef XNB_DEBUG 518static void __unused 519xnb_dump_gnttab_copy(const struct gnttab_copy *entry) 520{ 521 if (entry == NULL) { 522 printf("NULL grant table pointer\n"); 523 return; 524 } 525 526 if (entry->flags & GNTCOPY_dest_gref) 527 printf("gnttab dest ref=\t%u\n", entry->dest.u.ref); 528 else 529 printf("gnttab dest gmfn=\t%lu\n", entry->dest.u.gmfn); 530 printf("gnttab dest offset=\t%hu\n", entry->dest.offset); 531 printf("gnttab dest domid=\t%hu\n", entry->dest.domid); 532 if (entry->flags & GNTCOPY_source_gref) 533 printf("gnttab source ref=\t%u\n", entry->source.u.ref); 534 else 535 printf("gnttab source gmfn=\t%lu\n", entry->source.u.gmfn); 536 printf("gnttab source offset=\t%hu\n", entry->source.offset); 537 printf("gnttab source domid=\t%hu\n", entry->source.domid); 538 printf("gnttab len=\t%hu\n", entry->len); 539 printf("gnttab flags=\t%hu\n", entry->flags); 540 printf("gnttab status=\t%hd\n", entry->status); 541} 542 543static int 544xnb_dump_rings(SYSCTL_HANDLER_ARGS) 545{ 546 static char results[720]; 547 struct xnb_softc const* xnb = (struct xnb_softc*)arg1; 548 netif_rx_back_ring_t const* rxb = 549 &xnb->ring_configs[XNB_RING_TYPE_RX].back_ring.rx_ring; 550 netif_tx_back_ring_t const* txb = 551 &xnb->ring_configs[XNB_RING_TYPE_TX].back_ring.tx_ring; 552 553 /* empty the result strings */ 554 results[0] = 0; 555 556 if ( !txb || !txb->sring || !rxb || !rxb->sring ) 557 return (SYSCTL_OUT(req, results, strnlen(results, 720))); 558 559 snprintf(results, 720, 560 "\n\t%35s %18s\n" /* TX, RX */ 561 "\t%16s %18d %18d\n" /* req_cons */ 562 "\t%16s %18d %18d\n" /* nr_ents */ 563 "\t%16s %18d %18d\n" /* rsp_prod_pvt */ 564 "\t%16s %18p %18p\n" /* sring */ 565 "\t%16s %18d %18d\n" /* req_prod */ 566 "\t%16s %18d %18d\n" /* req_event */ 567 "\t%16s %18d %18d\n" /* rsp_prod */ 568 "\t%16s %18d %18d\n", /* rsp_event */ 569 "TX", "RX", 570 "req_cons", txb->req_cons, rxb->req_cons, 571 "nr_ents", txb->nr_ents, rxb->nr_ents, 572 "rsp_prod_pvt", txb->rsp_prod_pvt, rxb->rsp_prod_pvt, 573 "sring", txb->sring, rxb->sring, 574 "sring->req_prod", txb->sring->req_prod, rxb->sring->req_prod, 575 "sring->req_event", txb->sring->req_event, rxb->sring->req_event, 576 "sring->rsp_prod", txb->sring->rsp_prod, rxb->sring->rsp_prod, 577 "sring->rsp_event", txb->sring->rsp_event, rxb->sring->rsp_event); 578 579 return (SYSCTL_OUT(req, results, strnlen(results, 720))); 580} 581 582static void __unused 583xnb_dump_mbuf(const struct mbuf *m) 584{ 585 int len; 586 uint8_t *d; 587 if (m == NULL) 588 return; 589 590 printf("xnb_dump_mbuf:\n"); 591 if (m->m_flags & M_PKTHDR) { 592 printf(" flowid=%10d, csum_flags=%#8x, csum_data=%#8x, " 593 "tso_segsz=%5hd\n", 594 m->m_pkthdr.flowid, (int)m->m_pkthdr.csum_flags, 595 m->m_pkthdr.csum_data, m->m_pkthdr.tso_segsz); 596 printf(" rcvif=%16p, len=%19d\n", 597 m->m_pkthdr.rcvif, m->m_pkthdr.len); 598 } 599 printf(" m_next=%16p, m_nextpk=%16p, m_data=%16p\n", 600 m->m_next, m->m_nextpkt, m->m_data); 601 printf(" m_len=%17d, m_flags=%#15x, m_type=%18u\n", 602 m->m_len, m->m_flags, m->m_type); 603 604 len = m->m_len; 605 d = mtod(m, uint8_t*); 606 while (len > 0) { 607 int i; 608 printf(" "); 609 for (i = 0; (i < 16) && (len > 0); i++, len--) { 610 printf("%02hhx ", *(d++)); 611 } 612 printf("\n"); 613 } 614} 615#endif /* XNB_DEBUG */ 616 617/*------------------------ Inter-Domain Communication ------------------------*/ 618/** 619 * Free dynamically allocated KVA or pseudo-physical address allocations. 620 * 621 * \param xnb Per-instance xnb configuration structure. 622 */ 623static void 624xnb_free_communication_mem(struct xnb_softc *xnb) 625{ 626 if (xnb->kva != 0) { 627#ifndef XENHVM 628 kva_free(xnb->kva, xnb->kva_size); 629#else 630 if (xnb->pseudo_phys_res != NULL) { 631 bus_release_resource(xnb->dev, SYS_RES_MEMORY, 632 xnb->pseudo_phys_res_id, 633 xnb->pseudo_phys_res); 634 xnb->pseudo_phys_res = NULL; 635 } 636#endif /* XENHVM */ 637 } 638 xnb->kva = 0; 639 xnb->gnt_base_addr = 0; 640} 641 642/** 643 * Cleanup all inter-domain communication mechanisms. 644 * 645 * \param xnb Per-instance xnb configuration structure. 646 */ 647static int 648xnb_disconnect(struct xnb_softc *xnb) 649{ 650 struct gnttab_unmap_grant_ref gnts[XNB_NUM_RING_TYPES]; 651 int error; 652 int i; 653 654 xen_intr_unbind(xnb->xen_intr_handle); 655 656 /* 657 * We may still have another thread currently processing requests. We 658 * must acquire the rx and tx locks to make sure those threads are done, 659 * but we can release those locks as soon as we acquire them, because no 660 * more interrupts will be arriving. 661 */ 662 mtx_lock(&xnb->tx_lock); 663 mtx_unlock(&xnb->tx_lock); 664 mtx_lock(&xnb->rx_lock); 665 mtx_unlock(&xnb->rx_lock); 666 667 /* Free malloc'd softc member variables */ 668 if (xnb->bridge != NULL) 669 free(xnb->bridge, M_XENSTORE); 670 671 /* All request processing has stopped, so unmap the rings */ 672 for (i=0; i < XNB_NUM_RING_TYPES; i++) { 673 gnts[i].host_addr = xnb->ring_configs[i].gnt_addr; 674 gnts[i].dev_bus_addr = xnb->ring_configs[i].bus_addr; 675 gnts[i].handle = xnb->ring_configs[i].handle; 676 } 677 error = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, gnts, 678 XNB_NUM_RING_TYPES); 679 KASSERT(error == 0, ("Grant table unmap op failed (%d)", error)); 680 681 xnb_free_communication_mem(xnb); 682 /* 683 * Zero the ring config structs because the pointers, handles, and 684 * grant refs contained therein are no longer valid. 685 */ 686 bzero(&xnb->ring_configs[XNB_RING_TYPE_TX], 687 sizeof(struct xnb_ring_config)); 688 bzero(&xnb->ring_configs[XNB_RING_TYPE_RX], 689 sizeof(struct xnb_ring_config)); 690 691 xnb->flags &= ~XNBF_RING_CONNECTED; 692 return (0); 693} 694 695/** 696 * Map a single shared memory ring into domain local address space and 697 * initialize its control structure 698 * 699 * \param xnb Per-instance xnb configuration structure 700 * \param ring_type Array index of this ring in the xnb's array of rings 701 * \return An errno 702 */ 703static int 704xnb_connect_ring(struct xnb_softc *xnb, xnb_ring_type_t ring_type) 705{ 706 struct gnttab_map_grant_ref gnt; 707 struct xnb_ring_config *ring = &xnb->ring_configs[ring_type]; 708 int error; 709 710 /* TX ring type = 0, RX =1 */ 711 ring->va = xnb->kva + ring_type * PAGE_SIZE; 712 ring->gnt_addr = xnb->gnt_base_addr + ring_type * PAGE_SIZE; 713 714 gnt.host_addr = ring->gnt_addr; 715 gnt.flags = GNTMAP_host_map; 716 gnt.ref = ring->ring_ref; 717 gnt.dom = xnb->otherend_id; 718 719 error = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &gnt, 1); 720 if (error != 0) 721 panic("netback: Ring page grant table op failed (%d)", error); 722 723 if (gnt.status != 0) { 724 ring->va = 0; 725 error = EACCES; 726 xenbus_dev_fatal(xnb->dev, error, 727 "Ring shared page mapping failed. " 728 "Status %d.", gnt.status); 729 } else { 730 ring->handle = gnt.handle; 731 ring->bus_addr = gnt.dev_bus_addr; 732 733 if (ring_type == XNB_RING_TYPE_TX) { 734 BACK_RING_INIT(&ring->back_ring.tx_ring, 735 (netif_tx_sring_t*)ring->va, 736 ring->ring_pages * PAGE_SIZE); 737 } else if (ring_type == XNB_RING_TYPE_RX) { 738 BACK_RING_INIT(&ring->back_ring.rx_ring, 739 (netif_rx_sring_t*)ring->va, 740 ring->ring_pages * PAGE_SIZE); 741 } else { 742 xenbus_dev_fatal(xnb->dev, error, 743 "Unknown ring type %d", ring_type); 744 } 745 } 746 747 return error; 748} 749 750/** 751 * Setup the shared memory rings and bind an interrupt to the event channel 752 * used to notify us of ring changes. 753 * 754 * \param xnb Per-instance xnb configuration structure. 755 */ 756static int 757xnb_connect_comms(struct xnb_softc *xnb) 758{ 759 int error; 760 xnb_ring_type_t i; 761 762 if ((xnb->flags & XNBF_RING_CONNECTED) != 0) 763 return (0); 764 765 /* 766 * Kva for our rings are at the tail of the region of kva allocated 767 * by xnb_alloc_communication_mem(). 768 */ 769 for (i=0; i < XNB_NUM_RING_TYPES; i++) { 770 error = xnb_connect_ring(xnb, i); 771 if (error != 0) 772 return error; 773 } 774 775 xnb->flags |= XNBF_RING_CONNECTED; 776 777 error = xen_intr_bind_remote_port(xnb->dev, 778 xnb->otherend_id, 779 xnb->evtchn, 780 /*filter*/NULL, 781 xnb_intr, /*arg*/xnb, 782 INTR_TYPE_BIO | INTR_MPSAFE, 783 &xnb->xen_intr_handle); 784 if (error != 0) { 785 (void)xnb_disconnect(xnb); 786 xenbus_dev_fatal(xnb->dev, error, "binding event channel"); 787 return (error); 788 } 789 790 DPRINTF("rings connected!\n"); 791 792 return (0); 793} 794 795/** 796 * Size KVA and pseudo-physical address allocations based on negotiated 797 * values for the size and number of I/O requests, and the size of our 798 * communication ring. 799 * 800 * \param xnb Per-instance xnb configuration structure. 801 * 802 * These address spaces are used to dynamically map pages in the 803 * front-end's domain into our own. 804 */ 805static int 806xnb_alloc_communication_mem(struct xnb_softc *xnb) 807{ 808 xnb_ring_type_t i; 809 810 xnb->kva_size = 0; 811 for (i=0; i < XNB_NUM_RING_TYPES; i++) { 812 xnb->kva_size += xnb->ring_configs[i].ring_pages * PAGE_SIZE; 813 } 814#ifndef XENHVM 815 xnb->kva = kva_alloc(xnb->kva_size); 816 if (xnb->kva == 0) 817 return (ENOMEM); 818 xnb->gnt_base_addr = xnb->kva; 819#else /* defined XENHVM */ 820 /* 821 * Reserve a range of pseudo physical memory that we can map 822 * into kva. These pages will only be backed by machine 823 * pages ("real memory") during the lifetime of front-end requests 824 * via grant table operations. We will map the netif tx and rx rings 825 * into this space. 826 */ 827 xnb->pseudo_phys_res_id = 0; 828 xnb->pseudo_phys_res = bus_alloc_resource(xnb->dev, SYS_RES_MEMORY, 829 &xnb->pseudo_phys_res_id, 830 0, ~0, xnb->kva_size, 831 RF_ACTIVE); 832 if (xnb->pseudo_phys_res == NULL) { 833 xnb->kva = 0; 834 return (ENOMEM); 835 } 836 xnb->kva = (vm_offset_t)rman_get_virtual(xnb->pseudo_phys_res); 837 xnb->gnt_base_addr = rman_get_start(xnb->pseudo_phys_res); 838#endif /* !defined XENHVM */ 839 return (0); 840} 841 842/** 843 * Collect information from the XenStore related to our device and its frontend 844 * 845 * \param xnb Per-instance xnb configuration structure. 846 */ 847static int 848xnb_collect_xenstore_info(struct xnb_softc *xnb) 849{ 850 /** 851 * \todo Linux collects the following info. We should collect most 852 * of this, too: 853 * "feature-rx-notify" 854 */ 855 const char *otherend_path; 856 const char *our_path; 857 int err; 858 unsigned int rx_copy, bridge_len; 859 uint8_t no_csum_offload; 860 861 otherend_path = xenbus_get_otherend_path(xnb->dev); 862 our_path = xenbus_get_node(xnb->dev); 863 864 /* Collect the critical communication parameters */ 865 err = xs_gather(XST_NIL, otherend_path, 866 "tx-ring-ref", "%l" PRIu32, 867 &xnb->ring_configs[XNB_RING_TYPE_TX].ring_ref, 868 "rx-ring-ref", "%l" PRIu32, 869 &xnb->ring_configs[XNB_RING_TYPE_RX].ring_ref, 870 "event-channel", "%" PRIu32, &xnb->evtchn, 871 NULL); 872 if (err != 0) { 873 xenbus_dev_fatal(xnb->dev, err, 874 "Unable to retrieve ring information from " 875 "frontend %s. Unable to connect.", 876 otherend_path); 877 return (err); 878 } 879 880 /* Collect the handle from xenstore */ 881 err = xs_scanf(XST_NIL, our_path, "handle", NULL, "%li", &xnb->handle); 882 if (err != 0) { 883 xenbus_dev_fatal(xnb->dev, err, 884 "Error reading handle from frontend %s. " 885 "Unable to connect.", otherend_path); 886 } 887 888 /* 889 * Collect the bridgename, if any. We do not need bridge_len; we just 890 * throw it away 891 */ 892 err = xs_read(XST_NIL, our_path, "bridge", &bridge_len, 893 (void**)&xnb->bridge); 894 if (err != 0) 895 xnb->bridge = NULL; 896 897 /* 898 * Does the frontend request that we use rx copy? If not, return an 899 * error because this driver only supports rx copy. 900 */ 901 err = xs_scanf(XST_NIL, otherend_path, "request-rx-copy", NULL, 902 "%" PRIu32, &rx_copy); 903 if (err == ENOENT) { 904 err = 0; 905 rx_copy = 0; 906 } 907 if (err < 0) { 908 xenbus_dev_fatal(xnb->dev, err, "reading %s/request-rx-copy", 909 otherend_path); 910 return err; 911 } 912 /** 913 * \todo: figure out the exact meaning of this feature, and when 914 * the frontend will set it to true. It should be set to true 915 * at some point 916 */ 917/* if (!rx_copy)*/ 918/* return EOPNOTSUPP;*/ 919 920 /** \todo Collect the rx notify feature */ 921 922 /* Collect the feature-sg. */ 923 if (xs_scanf(XST_NIL, otherend_path, "feature-sg", NULL, 924 "%hhu", &xnb->can_sg) < 0) 925 xnb->can_sg = 0; 926 927 /* Collect remaining frontend features */ 928 if (xs_scanf(XST_NIL, otherend_path, "feature-gso-tcpv4", NULL, 929 "%hhu", &xnb->gso) < 0) 930 xnb->gso = 0; 931 932 if (xs_scanf(XST_NIL, otherend_path, "feature-gso-tcpv4-prefix", NULL, 933 "%hhu", &xnb->gso_prefix) < 0) 934 xnb->gso_prefix = 0; 935 936 if (xs_scanf(XST_NIL, otherend_path, "feature-no-csum-offload", NULL, 937 "%hhu", &no_csum_offload) < 0) 938 no_csum_offload = 0; 939 xnb->ip_csum = (no_csum_offload == 0); 940 941 return (0); 942} 943 944/** 945 * Supply information about the physical device to the frontend 946 * via XenBus. 947 * 948 * \param xnb Per-instance xnb configuration structure. 949 */ 950static int 951xnb_publish_backend_info(struct xnb_softc *xnb) 952{ 953 struct xs_transaction xst; 954 const char *our_path; 955 int error; 956 957 our_path = xenbus_get_node(xnb->dev); 958 959 do { 960 error = xs_transaction_start(&xst); 961 if (error != 0) { 962 xenbus_dev_fatal(xnb->dev, error, 963 "Error publishing backend info " 964 "(start transaction)"); 965 break; 966 } 967 968 error = xs_printf(xst, our_path, "feature-sg", 969 "%d", XNB_SG); 970 if (error != 0) 971 break; 972 973 error = xs_printf(xst, our_path, "feature-gso-tcpv4", 974 "%d", XNB_GSO_TCPV4); 975 if (error != 0) 976 break; 977 978 error = xs_printf(xst, our_path, "feature-rx-copy", 979 "%d", XNB_RX_COPY); 980 if (error != 0) 981 break; 982 983 error = xs_printf(xst, our_path, "feature-rx-flip", 984 "%d", XNB_RX_FLIP); 985 if (error != 0) 986 break; 987 988 error = xs_transaction_end(xst, 0); 989 if (error != 0 && error != EAGAIN) { 990 xenbus_dev_fatal(xnb->dev, error, "ending transaction"); 991 break; 992 } 993 994 } while (error == EAGAIN); 995 996 return (error); 997} 998 999/** 1000 * Connect to our netfront peer now that it has completed publishing 1001 * its configuration into the XenStore. 1002 * 1003 * \param xnb Per-instance xnb configuration structure. 1004 */ 1005static void 1006xnb_connect(struct xnb_softc *xnb) 1007{ 1008 int error; 1009 1010 if (xenbus_get_state(xnb->dev) == XenbusStateConnected) 1011 return; 1012 1013 if (xnb_collect_xenstore_info(xnb) != 0) 1014 return; 1015 1016 xnb->flags &= ~XNBF_SHUTDOWN; 1017 1018 /* Read front end configuration. */ 1019 1020 /* Allocate resources whose size depends on front-end configuration. */ 1021 error = xnb_alloc_communication_mem(xnb); 1022 if (error != 0) { 1023 xenbus_dev_fatal(xnb->dev, error, 1024 "Unable to allocate communication memory"); 1025 return; 1026 } 1027 1028 /* 1029 * Connect communication channel. 1030 */ 1031 error = xnb_connect_comms(xnb); 1032 if (error != 0) { 1033 /* Specific errors are reported by xnb_connect_comms(). */ 1034 return; 1035 } 1036 xnb->carrier = 1; 1037 1038 /* Ready for I/O. */ 1039 xenbus_set_state(xnb->dev, XenbusStateConnected); 1040} 1041 1042/*-------------------------- Device Teardown Support -------------------------*/ 1043/** 1044 * Perform device shutdown functions. 1045 * 1046 * \param xnb Per-instance xnb configuration structure. 1047 * 1048 * Mark this instance as shutting down, wait for any active requests 1049 * to drain, disconnect from the front-end, and notify any waiters (e.g. 1050 * a thread invoking our detach method) that detach can now proceed. 1051 */ 1052static int 1053xnb_shutdown(struct xnb_softc *xnb) 1054{ 1055 /* 1056 * Due to the need to drop our mutex during some 1057 * xenbus operations, it is possible for two threads 1058 * to attempt to close out shutdown processing at 1059 * the same time. Tell the caller that hits this 1060 * race to try back later. 1061 */ 1062 if ((xnb->flags & XNBF_IN_SHUTDOWN) != 0) 1063 return (EAGAIN); 1064 1065 xnb->flags |= XNBF_SHUTDOWN; 1066 1067 xnb->flags |= XNBF_IN_SHUTDOWN; 1068 1069 mtx_unlock(&xnb->sc_lock); 1070 /* Free the network interface */ 1071 xnb->carrier = 0; 1072 if (xnb->xnb_ifp != NULL) { 1073 ether_ifdetach(xnb->xnb_ifp); 1074 if_free(xnb->xnb_ifp); 1075 xnb->xnb_ifp = NULL; 1076 } 1077 mtx_lock(&xnb->sc_lock); 1078 1079 xnb_disconnect(xnb); 1080 1081 mtx_unlock(&xnb->sc_lock); 1082 if (xenbus_get_state(xnb->dev) < XenbusStateClosing) 1083 xenbus_set_state(xnb->dev, XenbusStateClosing); 1084 mtx_lock(&xnb->sc_lock); 1085 1086 xnb->flags &= ~XNBF_IN_SHUTDOWN; 1087 1088 1089 /* Indicate to xnb_detach() that is it safe to proceed. */ 1090 wakeup(xnb); 1091 1092 return (0); 1093} 1094 1095/** 1096 * Report an attach time error to the console and Xen, and cleanup 1097 * this instance by forcing immediate detach processing. 1098 * 1099 * \param xnb Per-instance xnb configuration structure. 1100 * \param err Errno describing the error. 1101 * \param fmt Printf style format and arguments 1102 */ 1103static void 1104xnb_attach_failed(struct xnb_softc *xnb, int err, const char *fmt, ...) 1105{ 1106 va_list ap; 1107 va_list ap_hotplug; 1108 1109 va_start(ap, fmt); 1110 va_copy(ap_hotplug, ap); 1111 xs_vprintf(XST_NIL, xenbus_get_node(xnb->dev), 1112 "hotplug-error", fmt, ap_hotplug); 1113 va_end(ap_hotplug); 1114 (void)xs_printf(XST_NIL, xenbus_get_node(xnb->dev), 1115 "hotplug-status", "error"); 1116 1117 xenbus_dev_vfatal(xnb->dev, err, fmt, ap); 1118 va_end(ap); 1119 1120 (void)xs_printf(XST_NIL, xenbus_get_node(xnb->dev), "online", "0"); 1121 xnb_detach(xnb->dev); 1122} 1123 1124/*---------------------------- NewBus Entrypoints ----------------------------*/ 1125/** 1126 * Inspect a XenBus device and claim it if is of the appropriate type. 1127 * 1128 * \param dev NewBus device object representing a candidate XenBus device. 1129 * 1130 * \return 0 for success, errno codes for failure. 1131 */ 1132static int 1133xnb_probe(device_t dev) 1134{ 1135 if (!strcmp(xenbus_get_type(dev), "vif")) { 1136 DPRINTF("Claiming device %d, %s\n", device_get_unit(dev), 1137 devclass_get_name(device_get_devclass(dev))); 1138 device_set_desc(dev, "Backend Virtual Network Device"); 1139 device_quiet(dev); 1140 return (0); 1141 } 1142 return (ENXIO); 1143} 1144 1145/** 1146 * Setup sysctl variables to control various Network Back parameters. 1147 * 1148 * \param xnb Xen Net Back softc. 1149 * 1150 */ 1151static void 1152xnb_setup_sysctl(struct xnb_softc *xnb) 1153{ 1154 struct sysctl_ctx_list *sysctl_ctx = NULL; 1155 struct sysctl_oid *sysctl_tree = NULL; 1156 1157 sysctl_ctx = device_get_sysctl_ctx(xnb->dev); 1158 if (sysctl_ctx == NULL) 1159 return; 1160 1161 sysctl_tree = device_get_sysctl_tree(xnb->dev); 1162 if (sysctl_tree == NULL) 1163 return; 1164 1165#ifdef XNB_DEBUG 1166 SYSCTL_ADD_PROC(sysctl_ctx, 1167 SYSCTL_CHILDREN(sysctl_tree), 1168 OID_AUTO, 1169 "unit_test_results", 1170 CTLTYPE_STRING | CTLFLAG_RD, 1171 xnb, 1172 0, 1173 xnb_unit_test_main, 1174 "A", 1175 "Results of builtin unit tests"); 1176 1177 SYSCTL_ADD_PROC(sysctl_ctx, 1178 SYSCTL_CHILDREN(sysctl_tree), 1179 OID_AUTO, 1180 "dump_rings", 1181 CTLTYPE_STRING | CTLFLAG_RD, 1182 xnb, 1183 0, 1184 xnb_dump_rings, 1185 "A", 1186 "Xennet Back Rings"); 1187#endif /* XNB_DEBUG */ 1188} 1189 1190/** 1191 * Create a network device. 1192 * @param handle device handle 1193 */ 1194int 1195create_netdev(device_t dev) 1196{ 1197 struct ifnet *ifp; 1198 struct xnb_softc *xnb; 1199 int err = 0; 1200 1201 xnb = device_get_softc(dev); 1202 mtx_init(&xnb->sc_lock, "xnb_softc", "xen netback softc lock", MTX_DEF); 1203 mtx_init(&xnb->tx_lock, "xnb_tx", "xen netback tx lock", MTX_DEF); 1204 mtx_init(&xnb->rx_lock, "xnb_rx", "xen netback rx lock", MTX_DEF); 1205 1206 xnb->dev = dev; 1207 1208 ifmedia_init(&xnb->sc_media, 0, xnb_ifmedia_upd, xnb_ifmedia_sts); 1209 ifmedia_add(&xnb->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 1210 ifmedia_set(&xnb->sc_media, IFM_ETHER|IFM_MANUAL); 1211 1212 err = xen_net_read_mac(dev, xnb->mac); 1213 if (err == 0) { 1214 /* Set up ifnet structure */ 1215 ifp = xnb->xnb_ifp = if_alloc(IFT_ETHER); 1216 ifp->if_softc = xnb; 1217 if_initname(ifp, "xnb", device_get_unit(dev)); 1218 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1219 ifp->if_ioctl = xnb_ioctl; 1220 ifp->if_output = ether_output; 1221 ifp->if_start = xnb_start; 1222#ifdef notyet 1223 ifp->if_watchdog = xnb_watchdog; 1224#endif 1225 ifp->if_init = xnb_ifinit; 1226 ifp->if_mtu = ETHERMTU; 1227 ifp->if_snd.ifq_maxlen = NET_RX_RING_SIZE - 1; 1228 1229 ifp->if_hwassist = XNB_CSUM_FEATURES; 1230 ifp->if_capabilities = IFCAP_HWCSUM; 1231 ifp->if_capenable = IFCAP_HWCSUM; 1232 1233 ether_ifattach(ifp, xnb->mac); 1234 xnb->carrier = 0; 1235 } 1236 1237 return err; 1238} 1239 1240/** 1241 * Attach to a XenBus device that has been claimed by our probe routine. 1242 * 1243 * \param dev NewBus device object representing this Xen Net Back instance. 1244 * 1245 * \return 0 for success, errno codes for failure. 1246 */ 1247static int 1248xnb_attach(device_t dev) 1249{ 1250 struct xnb_softc *xnb; 1251 int error; 1252 xnb_ring_type_t i; 1253 1254 error = create_netdev(dev); 1255 if (error != 0) { 1256 xenbus_dev_fatal(dev, error, "creating netdev"); 1257 return (error); 1258 } 1259 1260 DPRINTF("Attaching to %s\n", xenbus_get_node(dev)); 1261 1262 /* 1263 * Basic initialization. 1264 * After this block it is safe to call xnb_detach() 1265 * to clean up any allocated data for this instance. 1266 */ 1267 xnb = device_get_softc(dev); 1268 xnb->otherend_id = xenbus_get_otherend_id(dev); 1269 for (i=0; i < XNB_NUM_RING_TYPES; i++) { 1270 xnb->ring_configs[i].ring_pages = 1; 1271 } 1272 1273 /* 1274 * Setup sysctl variables. 1275 */ 1276 xnb_setup_sysctl(xnb); 1277 1278 /* Update hot-plug status to satisfy xend. */ 1279 error = xs_printf(XST_NIL, xenbus_get_node(xnb->dev), 1280 "hotplug-status", "connected"); 1281 if (error != 0) { 1282 xnb_attach_failed(xnb, error, "writing %s/hotplug-status", 1283 xenbus_get_node(xnb->dev)); 1284 return (error); 1285 } 1286 1287 if ((error = xnb_publish_backend_info(xnb)) != 0) { 1288 /* 1289 * If we can't publish our data, we cannot participate 1290 * in this connection, and waiting for a front-end state 1291 * change will not help the situation. 1292 */ 1293 xnb_attach_failed(xnb, error, 1294 "Publishing backend status for %s", 1295 xenbus_get_node(xnb->dev)); 1296 return error; 1297 } 1298 1299 /* Tell the front end that we are ready to connect. */ 1300 xenbus_set_state(dev, XenbusStateInitWait); 1301 1302 return (0); 1303} 1304 1305/** 1306 * Detach from a net back device instance. 1307 * 1308 * \param dev NewBus device object representing this Xen Net Back instance. 1309 * 1310 * \return 0 for success, errno codes for failure. 1311 * 1312 * \note A net back device may be detached at any time in its life-cycle, 1313 * including part way through the attach process. For this reason, 1314 * initialization order and the intialization state checks in this 1315 * routine must be carefully coupled so that attach time failures 1316 * are gracefully handled. 1317 */ 1318static int 1319xnb_detach(device_t dev) 1320{ 1321 struct xnb_softc *xnb; 1322 1323 DPRINTF("\n"); 1324 1325 xnb = device_get_softc(dev); 1326 mtx_lock(&xnb->sc_lock); 1327 while (xnb_shutdown(xnb) == EAGAIN) { 1328 msleep(xnb, &xnb->sc_lock, /*wakeup prio unchanged*/0, 1329 "xnb_shutdown", 0); 1330 } 1331 mtx_unlock(&xnb->sc_lock); 1332 DPRINTF("\n"); 1333 1334 mtx_destroy(&xnb->tx_lock); 1335 mtx_destroy(&xnb->rx_lock); 1336 mtx_destroy(&xnb->sc_lock); 1337 return (0); 1338} 1339 1340/** 1341 * Prepare this net back device for suspension of this VM. 1342 * 1343 * \param dev NewBus device object representing this Xen net Back instance. 1344 * 1345 * \return 0 for success, errno codes for failure. 1346 */ 1347static int 1348xnb_suspend(device_t dev) 1349{ 1350 return (0); 1351} 1352 1353/** 1354 * Perform any processing required to recover from a suspended state. 1355 * 1356 * \param dev NewBus device object representing this Xen Net Back instance. 1357 * 1358 * \return 0 for success, errno codes for failure. 1359 */ 1360static int 1361xnb_resume(device_t dev) 1362{ 1363 return (0); 1364} 1365 1366/** 1367 * Handle state changes expressed via the XenStore by our front-end peer. 1368 * 1369 * \param dev NewBus device object representing this Xen 1370 * Net Back instance. 1371 * \param frontend_state The new state of the front-end. 1372 * 1373 * \return 0 for success, errno codes for failure. 1374 */ 1375static void 1376xnb_frontend_changed(device_t dev, XenbusState frontend_state) 1377{ 1378 struct xnb_softc *xnb; 1379 1380 xnb = device_get_softc(dev); 1381 1382 DPRINTF("frontend_state=%s, xnb_state=%s\n", 1383 xenbus_strstate(frontend_state), 1384 xenbus_strstate(xenbus_get_state(xnb->dev))); 1385 1386 switch (frontend_state) { 1387 case XenbusStateInitialising: 1388 break; 1389 case XenbusStateInitialised: 1390 case XenbusStateConnected: 1391 xnb_connect(xnb); 1392 break; 1393 case XenbusStateClosing: 1394 case XenbusStateClosed: 1395 mtx_lock(&xnb->sc_lock); 1396 xnb_shutdown(xnb); 1397 mtx_unlock(&xnb->sc_lock); 1398 if (frontend_state == XenbusStateClosed) 1399 xenbus_set_state(xnb->dev, XenbusStateClosed); 1400 break; 1401 default: 1402 xenbus_dev_fatal(xnb->dev, EINVAL, "saw state %d at frontend", 1403 frontend_state); 1404 break; 1405 } 1406} 1407 1408 1409/*---------------------------- Request Processing ----------------------------*/ 1410/** 1411 * Interrupt handler bound to the shared ring's event channel. 1412 * Entry point for the xennet transmit path in netback 1413 * Transfers packets from the Xen ring to the host's generic networking stack 1414 * 1415 * \param arg Callback argument registerd during event channel 1416 * binding - the xnb_softc for this instance. 1417 */ 1418static void 1419xnb_intr(void *arg) 1420{ 1421 struct xnb_softc *xnb; 1422 struct ifnet *ifp; 1423 netif_tx_back_ring_t *txb; 1424 RING_IDX req_prod_local; 1425 1426 xnb = (struct xnb_softc *)arg; 1427 ifp = xnb->xnb_ifp; 1428 txb = &xnb->ring_configs[XNB_RING_TYPE_TX].back_ring.tx_ring; 1429 1430 mtx_lock(&xnb->tx_lock); 1431 do { 1432 int notify; 1433 req_prod_local = txb->sring->req_prod; 1434 xen_rmb(); 1435 1436 for (;;) { 1437 struct mbuf *mbufc; 1438 int err; 1439 1440 err = xnb_recv(txb, xnb->otherend_id, &mbufc, ifp, 1441 xnb->tx_gnttab); 1442 if (err || (mbufc == NULL)) 1443 break; 1444 1445 /* Send the packet to the generic network stack */ 1446 (*xnb->xnb_ifp->if_input)(xnb->xnb_ifp, mbufc); 1447 } 1448 1449 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(txb, notify); 1450 if (notify != 0) 1451 xen_intr_signal(xnb->xen_intr_handle); 1452 1453 txb->sring->req_event = txb->req_cons + 1; 1454 xen_mb(); 1455 } while (txb->sring->req_prod != req_prod_local) ; 1456 mtx_unlock(&xnb->tx_lock); 1457 1458 xnb_start(ifp); 1459} 1460 1461 1462/** 1463 * Build a struct xnb_pkt based on netif_tx_request's from a netif tx ring. 1464 * Will read exactly 0 or 1 packets from the ring; never a partial packet. 1465 * \param[out] pkt The returned packet. If there is an error building 1466 * the packet, pkt.list_len will be set to 0. 1467 * \param[in] tx_ring Pointer to the Ring that is the input to this function 1468 * \param[in] start The ring index of the first potential request 1469 * \return The number of requests consumed to build this packet 1470 */ 1471static int 1472xnb_ring2pkt(struct xnb_pkt *pkt, const netif_tx_back_ring_t *tx_ring, 1473 RING_IDX start) 1474{ 1475 /* 1476 * Outline: 1477 * 1) Initialize pkt 1478 * 2) Read the first request of the packet 1479 * 3) Read the extras 1480 * 4) Set cdr 1481 * 5) Loop on the remainder of the packet 1482 * 6) Finalize pkt (stuff like car_size and list_len) 1483 */ 1484 int idx = start; 1485 int discard = 0; /* whether to discard the packet */ 1486 int more_data = 0; /* there are more request past the last one */ 1487 uint16_t cdr_size = 0; /* accumulated size of requests 2 through n */ 1488 1489 xnb_pkt_initialize(pkt); 1490 1491 /* Read the first request */ 1492 if (RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) { 1493 netif_tx_request_t *tx = RING_GET_REQUEST(tx_ring, idx); 1494 pkt->size = tx->size; 1495 pkt->flags = tx->flags & ~NETTXF_more_data; 1496 more_data = tx->flags & NETTXF_more_data; 1497 pkt->list_len++; 1498 pkt->car = idx; 1499 idx++; 1500 } 1501 1502 /* Read the extra info */ 1503 if ((pkt->flags & NETTXF_extra_info) && 1504 RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) { 1505 netif_extra_info_t *ext = 1506 (netif_extra_info_t*) RING_GET_REQUEST(tx_ring, idx); 1507 pkt->extra.type = ext->type; 1508 switch (pkt->extra.type) { 1509 case XEN_NETIF_EXTRA_TYPE_GSO: 1510 pkt->extra.u.gso = ext->u.gso; 1511 break; 1512 default: 1513 /* 1514 * The reference Linux netfront driver will 1515 * never set any other extra.type. So we don't 1516 * know what to do with it. Let's print an 1517 * error, then consume and discard the packet 1518 */ 1519 printf("xnb(%s:%d): Unknown extra info type %d." 1520 " Discarding packet\n", 1521 __func__, __LINE__, pkt->extra.type); 1522 xnb_dump_txreq(start, RING_GET_REQUEST(tx_ring, 1523 start)); 1524 xnb_dump_txreq(idx, RING_GET_REQUEST(tx_ring, 1525 idx)); 1526 discard = 1; 1527 break; 1528 } 1529 1530 pkt->extra.flags = ext->flags; 1531 if (ext->flags & XEN_NETIF_EXTRA_FLAG_MORE) { 1532 /* 1533 * The reference linux netfront driver never sets this 1534 * flag (nor does any other known netfront). So we 1535 * will discard the packet. 1536 */ 1537 printf("xnb(%s:%d): Request sets " 1538 "XEN_NETIF_EXTRA_FLAG_MORE, but we can't handle " 1539 "that\n", __func__, __LINE__); 1540 xnb_dump_txreq(start, RING_GET_REQUEST(tx_ring, start)); 1541 xnb_dump_txreq(idx, RING_GET_REQUEST(tx_ring, idx)); 1542 discard = 1; 1543 } 1544 1545 idx++; 1546 } 1547 1548 /* Set cdr. If there is not more data, cdr is invalid */ 1549 pkt->cdr = idx; 1550 1551 /* Loop on remainder of packet */ 1552 while (more_data && RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) { 1553 netif_tx_request_t *tx = RING_GET_REQUEST(tx_ring, idx); 1554 pkt->list_len++; 1555 cdr_size += tx->size; 1556 if (tx->flags & ~NETTXF_more_data) { 1557 /* There should be no other flags set at this point */ 1558 printf("xnb(%s:%d): Request sets unknown flags %d " 1559 "after the 1st request in the packet.\n", 1560 __func__, __LINE__, tx->flags); 1561 xnb_dump_txreq(start, RING_GET_REQUEST(tx_ring, start)); 1562 xnb_dump_txreq(idx, RING_GET_REQUEST(tx_ring, idx)); 1563 } 1564 1565 more_data = tx->flags & NETTXF_more_data; 1566 idx++; 1567 } 1568 1569 /* Finalize packet */ 1570 if (more_data != 0) { 1571 /* The ring ran out of requests before finishing the packet */ 1572 xnb_pkt_invalidate(pkt); 1573 idx = start; /* tell caller that we consumed no requests */ 1574 } else { 1575 /* Calculate car_size */ 1576 pkt->car_size = pkt->size - cdr_size; 1577 } 1578 if (discard != 0) { 1579 xnb_pkt_invalidate(pkt); 1580 } 1581 1582 return idx - start; 1583} 1584 1585 1586/** 1587 * Respond to all the requests that constituted pkt. Builds the responses and 1588 * writes them to the ring, but doesn't push them to the shared ring. 1589 * \param[in] pkt the packet that needs a response 1590 * \param[in] error true if there was an error handling the packet, such 1591 * as in the hypervisor copy op or mbuf allocation 1592 * \param[out] ring Responses go here 1593 */ 1594static void 1595xnb_txpkt2rsp(const struct xnb_pkt *pkt, netif_tx_back_ring_t *ring, 1596 int error) 1597{ 1598 /* 1599 * Outline: 1600 * 1) Respond to the first request 1601 * 2) Respond to the extra info reques 1602 * Loop through every remaining request in the packet, generating 1603 * responses that copy those requests' ids and sets the status 1604 * appropriately. 1605 */ 1606 netif_tx_request_t *tx; 1607 netif_tx_response_t *rsp; 1608 int i; 1609 uint16_t status; 1610 1611 status = (xnb_pkt_is_valid(pkt) == 0) || error ? 1612 NETIF_RSP_ERROR : NETIF_RSP_OKAY; 1613 KASSERT((pkt->list_len == 0) || (ring->rsp_prod_pvt == pkt->car), 1614 ("Cannot respond to ring requests out of order")); 1615 1616 if (pkt->list_len >= 1) { 1617 uint16_t id; 1618 tx = RING_GET_REQUEST(ring, ring->rsp_prod_pvt); 1619 id = tx->id; 1620 rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); 1621 rsp->id = id; 1622 rsp->status = status; 1623 ring->rsp_prod_pvt++; 1624 1625 if (pkt->flags & NETRXF_extra_info) { 1626 rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); 1627 rsp->status = NETIF_RSP_NULL; 1628 ring->rsp_prod_pvt++; 1629 } 1630 } 1631 1632 for (i=0; i < pkt->list_len - 1; i++) { 1633 uint16_t id; 1634 tx = RING_GET_REQUEST(ring, ring->rsp_prod_pvt); 1635 id = tx->id; 1636 rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); 1637 rsp->id = id; 1638 rsp->status = status; 1639 ring->rsp_prod_pvt++; 1640 } 1641} 1642 1643/** 1644 * Create an mbuf chain to represent a packet. Initializes all of the headers 1645 * in the mbuf chain, but does not copy the data. The returned chain must be 1646 * free()'d when no longer needed 1647 * \param[in] pkt A packet to model the mbuf chain after 1648 * \return A newly allocated mbuf chain, possibly with clusters attached. 1649 * NULL on failure 1650 */ 1651static struct mbuf* 1652xnb_pkt2mbufc(const struct xnb_pkt *pkt, struct ifnet *ifp) 1653{ 1654 /** 1655 * \todo consider using a memory pool for mbufs instead of 1656 * reallocating them for every packet 1657 */ 1658 /** \todo handle extra data */ 1659 struct mbuf *m; 1660 1661 m = m_getm(NULL, pkt->size, M_NOWAIT, MT_DATA); 1662 1663 if (m != NULL) { 1664 m->m_pkthdr.rcvif = ifp; 1665 if (pkt->flags & NETTXF_data_validated) { 1666 /* 1667 * We lie to the host OS and always tell it that the 1668 * checksums are ok, because the packet is unlikely to 1669 * get corrupted going across domains. 1670 */ 1671 m->m_pkthdr.csum_flags = ( 1672 CSUM_IP_CHECKED | 1673 CSUM_IP_VALID | 1674 CSUM_DATA_VALID | 1675 CSUM_PSEUDO_HDR 1676 ); 1677 m->m_pkthdr.csum_data = 0xffff; 1678 } 1679 } 1680 return m; 1681} 1682 1683/** 1684 * Build a gnttab_copy table that can be used to copy data from a pkt 1685 * to an mbufc. Does not actually perform the copy. Always uses gref's on 1686 * the packet side. 1687 * \param[in] pkt pkt's associated requests form the src for 1688 * the copy operation 1689 * \param[in] mbufc mbufc's storage forms the dest for the copy operation 1690 * \param[out] gnttab Storage for the returned grant table 1691 * \param[in] txb Pointer to the backend ring structure 1692 * \param[in] otherend_id The domain ID of the other end of the copy 1693 * \return The number of gnttab entries filled 1694 */ 1695static int 1696xnb_txpkt2gnttab(const struct xnb_pkt *pkt, const struct mbuf *mbufc, 1697 gnttab_copy_table gnttab, const netif_tx_back_ring_t *txb, 1698 domid_t otherend_id) 1699{ 1700 1701 const struct mbuf *mbuf = mbufc;/* current mbuf within the chain */ 1702 int gnt_idx = 0; /* index into grant table */ 1703 RING_IDX r_idx = pkt->car; /* index into tx ring buffer */ 1704 int r_ofs = 0; /* offset of next data within tx request's data area */ 1705 int m_ofs = 0; /* offset of next data within mbuf's data area */ 1706 /* size in bytes that still needs to be represented in the table */ 1707 uint16_t size_remaining = pkt->size; 1708 1709 while (size_remaining > 0) { 1710 const netif_tx_request_t *txq = RING_GET_REQUEST(txb, r_idx); 1711 const size_t mbuf_space = M_TRAILINGSPACE(mbuf) - m_ofs; 1712 const size_t req_size = 1713 r_idx == pkt->car ? pkt->car_size : txq->size; 1714 const size_t pkt_space = req_size - r_ofs; 1715 /* 1716 * space is the largest amount of data that can be copied in the 1717 * grant table's next entry 1718 */ 1719 const size_t space = MIN(pkt_space, mbuf_space); 1720 1721 /* TODO: handle this error condition without panicking */ 1722 KASSERT(gnt_idx < GNTTAB_LEN, ("Grant table is too short")); 1723 1724 gnttab[gnt_idx].source.u.ref = txq->gref; 1725 gnttab[gnt_idx].source.domid = otherend_id; 1726 gnttab[gnt_idx].source.offset = txq->offset + r_ofs; 1727 gnttab[gnt_idx].dest.u.gmfn = virt_to_mfn( 1728 mtod(mbuf, vm_offset_t) + m_ofs); 1729 gnttab[gnt_idx].dest.offset = virt_to_offset( 1730 mtod(mbuf, vm_offset_t) + m_ofs); 1731 gnttab[gnt_idx].dest.domid = DOMID_SELF; 1732 gnttab[gnt_idx].len = space; 1733 gnttab[gnt_idx].flags = GNTCOPY_source_gref; 1734 1735 gnt_idx++; 1736 r_ofs += space; 1737 m_ofs += space; 1738 size_remaining -= space; 1739 if (req_size - r_ofs <= 0) { 1740 /* Must move to the next tx request */ 1741 r_ofs = 0; 1742 r_idx = (r_idx == pkt->car) ? pkt->cdr : r_idx + 1; 1743 } 1744 if (M_TRAILINGSPACE(mbuf) - m_ofs <= 0) { 1745 /* Must move to the next mbuf */ 1746 m_ofs = 0; 1747 mbuf = mbuf->m_next; 1748 } 1749 } 1750 1751 return gnt_idx; 1752} 1753 1754/** 1755 * Check the status of the grant copy operations, and update mbufs various 1756 * non-data fields to reflect the data present. 1757 * \param[in,out] mbufc mbuf chain to update. The chain must be valid and of 1758 * the correct length, and data should already be present 1759 * \param[in] gnttab A grant table for a just completed copy op 1760 * \param[in] n_entries The number of valid entries in the grant table 1761 */ 1762static void 1763xnb_update_mbufc(struct mbuf *mbufc, const gnttab_copy_table gnttab, 1764 int n_entries) 1765{ 1766 struct mbuf *mbuf = mbufc; 1767 int i; 1768 size_t total_size = 0; 1769 1770 for (i = 0; i < n_entries; i++) { 1771 KASSERT(gnttab[i].status == GNTST_okay, 1772 ("Some gnttab_copy entry had error status %hd\n", 1773 gnttab[i].status)); 1774 1775 mbuf->m_len += gnttab[i].len; 1776 total_size += gnttab[i].len; 1777 if (M_TRAILINGSPACE(mbuf) <= 0) { 1778 mbuf = mbuf->m_next; 1779 } 1780 } 1781 mbufc->m_pkthdr.len = total_size; 1782 1783#if defined(INET) || defined(INET6) 1784 xnb_add_mbuf_cksum(mbufc); 1785#endif 1786} 1787 1788/** 1789 * Dequeue at most one packet from the shared ring 1790 * \param[in,out] txb Netif tx ring. A packet will be removed from it, and 1791 * its private indices will be updated. But the indices 1792 * will not be pushed to the shared ring. 1793 * \param[in] ifnet Interface to which the packet will be sent 1794 * \param[in] otherend Domain ID of the other end of the ring 1795 * \param[out] mbufc The assembled mbuf chain, ready to send to the generic 1796 * networking stack 1797 * \param[in,out] gnttab Pointer to enough memory for a grant table. We make 1798 * this a function parameter so that we will take less 1799 * stack space. 1800 * \return An error code 1801 */ 1802static int 1803xnb_recv(netif_tx_back_ring_t *txb, domid_t otherend, struct mbuf **mbufc, 1804 struct ifnet *ifnet, gnttab_copy_table gnttab) 1805{ 1806 struct xnb_pkt pkt; 1807 /* number of tx requests consumed to build the last packet */ 1808 int num_consumed; 1809 int nr_ents; 1810 1811 *mbufc = NULL; 1812 num_consumed = xnb_ring2pkt(&pkt, txb, txb->req_cons); 1813 if (num_consumed == 0) 1814 return 0; /* Nothing to receive */ 1815 1816 /* update statistics independent of errors */ 1817 ifnet->if_ipackets++; 1818 1819 /* 1820 * if we got here, then 1 or more requests was consumed, but the packet 1821 * is not necessarily valid. 1822 */ 1823 if (xnb_pkt_is_valid(&pkt) == 0) { 1824 /* got a garbage packet, respond and drop it */ 1825 xnb_txpkt2rsp(&pkt, txb, 1); 1826 txb->req_cons += num_consumed; 1827 DPRINTF("xnb_intr: garbage packet, num_consumed=%d\n", 1828 num_consumed); 1829 ifnet->if_ierrors++; 1830 return EINVAL; 1831 } 1832 1833 *mbufc = xnb_pkt2mbufc(&pkt, ifnet); 1834 1835 if (*mbufc == NULL) { 1836 /* 1837 * Couldn't allocate mbufs. Respond and drop the packet. Do 1838 * not consume the requests 1839 */ 1840 xnb_txpkt2rsp(&pkt, txb, 1); 1841 DPRINTF("xnb_intr: Couldn't allocate mbufs, num_consumed=%d\n", 1842 num_consumed); 1843 ifnet->if_iqdrops++; 1844 return ENOMEM; 1845 } 1846 1847 nr_ents = xnb_txpkt2gnttab(&pkt, *mbufc, gnttab, txb, otherend); 1848 1849 if (nr_ents > 0) { 1850 int __unused hv_ret = HYPERVISOR_grant_table_op(GNTTABOP_copy, 1851 gnttab, nr_ents); 1852 KASSERT(hv_ret == 0, 1853 ("HYPERVISOR_grant_table_op returned %d\n", hv_ret)); 1854 xnb_update_mbufc(*mbufc, gnttab, nr_ents); 1855 } 1856 1857 xnb_txpkt2rsp(&pkt, txb, 0); 1858 txb->req_cons += num_consumed; 1859 return 0; 1860} 1861 1862/** 1863 * Create an xnb_pkt based on the contents of an mbuf chain. 1864 * \param[in] mbufc mbuf chain to transform into a packet 1865 * \param[out] pkt Storage for the newly generated xnb_pkt 1866 * \param[in] start The ring index of the first available slot in the rx 1867 * ring 1868 * \param[in] space The number of free slots in the rx ring 1869 * \retval 0 Success 1870 * \retval EINVAL mbufc was corrupt or not convertible into a pkt 1871 * \retval EAGAIN There was not enough space in the ring to queue the 1872 * packet 1873 */ 1874static int 1875xnb_mbufc2pkt(const struct mbuf *mbufc, struct xnb_pkt *pkt, 1876 RING_IDX start, int space) 1877{ 1878 1879 int retval = 0; 1880 1881 if ((mbufc == NULL) || 1882 ( (mbufc->m_flags & M_PKTHDR) == 0) || 1883 (mbufc->m_pkthdr.len == 0)) { 1884 xnb_pkt_invalidate(pkt); 1885 retval = EINVAL; 1886 } else { 1887 int slots_required; 1888 1889 xnb_pkt_validate(pkt); 1890 pkt->flags = 0; 1891 pkt->size = mbufc->m_pkthdr.len; 1892 pkt->car = start; 1893 pkt->car_size = mbufc->m_len; 1894 1895 if (mbufc->m_pkthdr.csum_flags & CSUM_TSO) { 1896 pkt->flags |= NETRXF_extra_info; 1897 pkt->extra.u.gso.size = mbufc->m_pkthdr.tso_segsz; 1898 pkt->extra.u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; 1899 pkt->extra.u.gso.pad = 0; 1900 pkt->extra.u.gso.features = 0; 1901 pkt->extra.type = XEN_NETIF_EXTRA_TYPE_GSO; 1902 pkt->extra.flags = 0; 1903 pkt->cdr = start + 2; 1904 } else { 1905 pkt->cdr = start + 1; 1906 } 1907 if (mbufc->m_pkthdr.csum_flags & (CSUM_TSO | CSUM_DELAY_DATA)) { 1908 pkt->flags |= 1909 (NETRXF_csum_blank | NETRXF_data_validated); 1910 } 1911 1912 /* 1913 * Each ring response can have up to PAGE_SIZE of data. 1914 * Assume that we can defragment the mbuf chain efficiently 1915 * into responses so that each response but the last uses all 1916 * PAGE_SIZE bytes. 1917 */ 1918 pkt->list_len = (pkt->size + PAGE_SIZE - 1) / PAGE_SIZE; 1919 1920 if (pkt->list_len > 1) { 1921 pkt->flags |= NETRXF_more_data; 1922 } 1923 1924 slots_required = pkt->list_len + 1925 (pkt->flags & NETRXF_extra_info ? 1 : 0); 1926 if (slots_required > space) { 1927 xnb_pkt_invalidate(pkt); 1928 retval = EAGAIN; 1929 } 1930 } 1931 1932 return retval; 1933} 1934 1935/** 1936 * Build a gnttab_copy table that can be used to copy data from an mbuf chain 1937 * to the frontend's shared buffers. Does not actually perform the copy. 1938 * Always uses gref's on the other end's side. 1939 * \param[in] pkt pkt's associated responses form the dest for the copy 1940 * operatoin 1941 * \param[in] mbufc The source for the copy operation 1942 * \param[out] gnttab Storage for the returned grant table 1943 * \param[in] rxb Pointer to the backend ring structure 1944 * \param[in] otherend_id The domain ID of the other end of the copy 1945 * \return The number of gnttab entries filled 1946 */ 1947static int 1948xnb_rxpkt2gnttab(const struct xnb_pkt *pkt, const struct mbuf *mbufc, 1949 gnttab_copy_table gnttab, const netif_rx_back_ring_t *rxb, 1950 domid_t otherend_id) 1951{ 1952 1953 const struct mbuf *mbuf = mbufc;/* current mbuf within the chain */ 1954 int gnt_idx = 0; /* index into grant table */ 1955 RING_IDX r_idx = pkt->car; /* index into rx ring buffer */ 1956 int r_ofs = 0; /* offset of next data within rx request's data area */ 1957 int m_ofs = 0; /* offset of next data within mbuf's data area */ 1958 /* size in bytes that still needs to be represented in the table */ 1959 uint16_t size_remaining; 1960 1961 size_remaining = (xnb_pkt_is_valid(pkt) != 0) ? pkt->size : 0; 1962 1963 while (size_remaining > 0) { 1964 const netif_rx_request_t *rxq = RING_GET_REQUEST(rxb, r_idx); 1965 const size_t mbuf_space = mbuf->m_len - m_ofs; 1966 /* Xen shared pages have an implied size of PAGE_SIZE */ 1967 const size_t req_size = PAGE_SIZE; 1968 const size_t pkt_space = req_size - r_ofs; 1969 /* 1970 * space is the largest amount of data that can be copied in the 1971 * grant table's next entry 1972 */ 1973 const size_t space = MIN(pkt_space, mbuf_space); 1974 1975 /* TODO: handle this error condition without panicing */ 1976 KASSERT(gnt_idx < GNTTAB_LEN, ("Grant table is too short")); 1977 1978 gnttab[gnt_idx].dest.u.ref = rxq->gref; 1979 gnttab[gnt_idx].dest.domid = otherend_id; 1980 gnttab[gnt_idx].dest.offset = r_ofs; 1981 gnttab[gnt_idx].source.u.gmfn = virt_to_mfn( 1982 mtod(mbuf, vm_offset_t) + m_ofs); 1983 gnttab[gnt_idx].source.offset = virt_to_offset( 1984 mtod(mbuf, vm_offset_t) + m_ofs); 1985 gnttab[gnt_idx].source.domid = DOMID_SELF; 1986 gnttab[gnt_idx].len = space; 1987 gnttab[gnt_idx].flags = GNTCOPY_dest_gref; 1988 1989 gnt_idx++; 1990 1991 r_ofs += space; 1992 m_ofs += space; 1993 size_remaining -= space; 1994 if (req_size - r_ofs <= 0) { 1995 /* Must move to the next rx request */ 1996 r_ofs = 0; 1997 r_idx = (r_idx == pkt->car) ? pkt->cdr : r_idx + 1; 1998 } 1999 if (mbuf->m_len - m_ofs <= 0) { 2000 /* Must move to the next mbuf */ 2001 m_ofs = 0; 2002 mbuf = mbuf->m_next; 2003 } 2004 } 2005 2006 return gnt_idx; 2007} 2008 2009/** 2010 * Generates responses for all the requests that constituted pkt. Builds 2011 * responses and writes them to the ring, but doesn't push the shared ring 2012 * indices. 2013 * \param[in] pkt the packet that needs a response 2014 * \param[in] gnttab The grant copy table corresponding to this packet. 2015 * Used to determine how many rsp->netif_rx_response_t's to 2016 * generate. 2017 * \param[in] n_entries Number of relevant entries in the grant table 2018 * \param[out] ring Responses go here 2019 * \return The number of RX requests that were consumed to generate 2020 * the responses 2021 */ 2022static int 2023xnb_rxpkt2rsp(const struct xnb_pkt *pkt, const gnttab_copy_table gnttab, 2024 int n_entries, netif_rx_back_ring_t *ring) 2025{ 2026 /* 2027 * This code makes the following assumptions: 2028 * * All entries in gnttab set GNTCOPY_dest_gref 2029 * * The entries in gnttab are grouped by their grefs: any two 2030 * entries with the same gref must be adjacent 2031 */ 2032 int error = 0; 2033 int gnt_idx, i; 2034 int n_responses = 0; 2035 grant_ref_t last_gref = GRANT_REF_INVALID; 2036 RING_IDX r_idx; 2037 2038 KASSERT(gnttab != NULL, ("Received a null granttable copy")); 2039 2040 /* 2041 * In the event of an error, we only need to send one response to the 2042 * netfront. In that case, we musn't write any data to the responses 2043 * after the one we send. So we must loop all the way through gnttab 2044 * looking for errors before we generate any responses 2045 * 2046 * Since we're looping through the grant table anyway, we'll count the 2047 * number of different gref's in it, which will tell us how many 2048 * responses to generate 2049 */ 2050 for (gnt_idx = 0; gnt_idx < n_entries; gnt_idx++) { 2051 int16_t status = gnttab[gnt_idx].status; 2052 if (status != GNTST_okay) { 2053 DPRINTF( 2054 "Got error %d for hypervisor gnttab_copy status\n", 2055 status); 2056 error = 1; 2057 break; 2058 } 2059 if (gnttab[gnt_idx].dest.u.ref != last_gref) { 2060 n_responses++; 2061 last_gref = gnttab[gnt_idx].dest.u.ref; 2062 } 2063 } 2064 2065 if (error != 0) { 2066 uint16_t id; 2067 netif_rx_response_t *rsp; 2068 2069 id = RING_GET_REQUEST(ring, ring->rsp_prod_pvt)->id; 2070 rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); 2071 rsp->id = id; 2072 rsp->status = NETIF_RSP_ERROR; 2073 n_responses = 1; 2074 } else { 2075 gnt_idx = 0; 2076 const int has_extra = pkt->flags & NETRXF_extra_info; 2077 if (has_extra != 0) 2078 n_responses++; 2079 2080 for (i = 0; i < n_responses; i++) { 2081 netif_rx_request_t rxq; 2082 netif_rx_response_t *rsp; 2083 2084 r_idx = ring->rsp_prod_pvt + i; 2085 /* 2086 * We copy the structure of rxq instead of making a 2087 * pointer because it shares the same memory as rsp. 2088 */ 2089 rxq = *(RING_GET_REQUEST(ring, r_idx)); 2090 rsp = RING_GET_RESPONSE(ring, r_idx); 2091 if (has_extra && (i == 1)) { 2092 netif_extra_info_t *ext = 2093 (netif_extra_info_t*)rsp; 2094 ext->type = XEN_NETIF_EXTRA_TYPE_GSO; 2095 ext->flags = 0; 2096 ext->u.gso.size = pkt->extra.u.gso.size; 2097 ext->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; 2098 ext->u.gso.pad = 0; 2099 ext->u.gso.features = 0; 2100 } else { 2101 rsp->id = rxq.id; 2102 rsp->status = GNTST_okay; 2103 rsp->offset = 0; 2104 rsp->flags = 0; 2105 if (i < pkt->list_len - 1) 2106 rsp->flags |= NETRXF_more_data; 2107 if ((i == 0) && has_extra) 2108 rsp->flags |= NETRXF_extra_info; 2109 if ((i == 0) && 2110 (pkt->flags & NETRXF_data_validated)) { 2111 rsp->flags |= NETRXF_data_validated; 2112 rsp->flags |= NETRXF_csum_blank; 2113 } 2114 rsp->status = 0; 2115 for (; gnttab[gnt_idx].dest.u.ref == rxq.gref; 2116 gnt_idx++) { 2117 rsp->status += gnttab[gnt_idx].len; 2118 } 2119 } 2120 } 2121 } 2122 2123 ring->req_cons += n_responses; 2124 ring->rsp_prod_pvt += n_responses; 2125 return n_responses; 2126} 2127 2128#if defined(INET) || defined(INET6) 2129/** 2130 * Add IP, TCP, and/or UDP checksums to every mbuf in a chain. The first mbuf 2131 * in the chain must start with a struct ether_header. 2132 * 2133 * XXX This function will perform incorrectly on UDP packets that are split up 2134 * into multiple ethernet frames. 2135 */ 2136static void 2137xnb_add_mbuf_cksum(struct mbuf *mbufc) 2138{ 2139 struct ether_header *eh; 2140 struct ip *iph; 2141 uint16_t ether_type; 2142 2143 eh = mtod(mbufc, struct ether_header*); 2144 ether_type = ntohs(eh->ether_type); 2145 if (ether_type != ETHERTYPE_IP) { 2146 /* Nothing to calculate */ 2147 return; 2148 } 2149 2150 iph = (struct ip*)(eh + 1); 2151 if (mbufc->m_pkthdr.csum_flags & CSUM_IP_VALID) { 2152 iph->ip_sum = 0; 2153 iph->ip_sum = in_cksum_hdr(iph); 2154 } 2155 2156 switch (iph->ip_p) { 2157 case IPPROTO_TCP: 2158 if (mbufc->m_pkthdr.csum_flags & CSUM_IP_VALID) { 2159 size_t tcplen = ntohs(iph->ip_len) - sizeof(struct ip); 2160 struct tcphdr *th = (struct tcphdr*)(iph + 1); 2161 th->th_sum = in_pseudo(iph->ip_src.s_addr, 2162 iph->ip_dst.s_addr, htons(IPPROTO_TCP + tcplen)); 2163 th->th_sum = in_cksum_skip(mbufc, 2164 sizeof(struct ether_header) + ntohs(iph->ip_len), 2165 sizeof(struct ether_header) + (iph->ip_hl << 2)); 2166 } 2167 break; 2168 case IPPROTO_UDP: 2169 if (mbufc->m_pkthdr.csum_flags & CSUM_IP_VALID) { 2170 size_t udplen = ntohs(iph->ip_len) - sizeof(struct ip); 2171 struct udphdr *uh = (struct udphdr*)(iph + 1); 2172 uh->uh_sum = in_pseudo(iph->ip_src.s_addr, 2173 iph->ip_dst.s_addr, htons(IPPROTO_UDP + udplen)); 2174 uh->uh_sum = in_cksum_skip(mbufc, 2175 sizeof(struct ether_header) + ntohs(iph->ip_len), 2176 sizeof(struct ether_header) + (iph->ip_hl << 2)); 2177 } 2178 break; 2179 default: 2180 break; 2181 } 2182} 2183#endif /* INET || INET6 */ 2184 2185static void 2186xnb_stop(struct xnb_softc *xnb) 2187{ 2188 struct ifnet *ifp; 2189 2190 mtx_assert(&xnb->sc_lock, MA_OWNED); 2191 ifp = xnb->xnb_ifp; 2192 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2193 if_link_state_change(ifp, LINK_STATE_DOWN); 2194} 2195 2196static int 2197xnb_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2198{ 2199 struct xnb_softc *xnb = ifp->if_softc; 2200 struct ifreq *ifr = (struct ifreq*) data; 2201#ifdef INET 2202 struct ifaddr *ifa = (struct ifaddr*)data; 2203#endif 2204 int error = 0; 2205 2206 switch (cmd) { 2207 case SIOCSIFFLAGS: 2208 mtx_lock(&xnb->sc_lock); 2209 if (ifp->if_flags & IFF_UP) { 2210 xnb_ifinit_locked(xnb); 2211 } else { 2212 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2213 xnb_stop(xnb); 2214 } 2215 } 2216 /* 2217 * Note: netfront sets a variable named xn_if_flags 2218 * here, but that variable is never read 2219 */ 2220 mtx_unlock(&xnb->sc_lock); 2221 break; 2222 case SIOCSIFADDR: 2223 case SIOCGIFADDR: 2224#ifdef INET 2225 mtx_lock(&xnb->sc_lock); 2226 if (ifa->ifa_addr->sa_family == AF_INET) { 2227 ifp->if_flags |= IFF_UP; 2228 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2229 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | 2230 IFF_DRV_OACTIVE); 2231 if_link_state_change(ifp, 2232 LINK_STATE_DOWN); 2233 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2234 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2235 if_link_state_change(ifp, 2236 LINK_STATE_UP); 2237 } 2238 arp_ifinit(ifp, ifa); 2239 mtx_unlock(&xnb->sc_lock); 2240 } else { 2241 mtx_unlock(&xnb->sc_lock); 2242#endif 2243 error = ether_ioctl(ifp, cmd, data); 2244#ifdef INET 2245 } 2246#endif 2247 break; 2248 case SIOCSIFCAP: 2249 mtx_lock(&xnb->sc_lock); 2250 if (ifr->ifr_reqcap & IFCAP_TXCSUM) { 2251 ifp->if_capenable |= IFCAP_TXCSUM; 2252 ifp->if_hwassist |= XNB_CSUM_FEATURES; 2253 } else { 2254 ifp->if_capenable &= ~(IFCAP_TXCSUM); 2255 ifp->if_hwassist &= ~(XNB_CSUM_FEATURES); 2256 } 2257 if ((ifr->ifr_reqcap & IFCAP_RXCSUM)) { 2258 ifp->if_capenable |= IFCAP_RXCSUM; 2259 } else { 2260 ifp->if_capenable &= ~(IFCAP_RXCSUM); 2261 } 2262 /* 2263 * TODO enable TSO4 and LRO once we no longer need 2264 * to calculate checksums in software 2265 */ 2266#if 0 2267 if (ifr->if_reqcap |= IFCAP_TSO4) { 2268 if (IFCAP_TXCSUM & ifp->if_capenable) { 2269 printf("xnb: Xen netif requires that " 2270 "TXCSUM be enabled in order " 2271 "to use TSO4\n"); 2272 error = EINVAL; 2273 } else { 2274 ifp->if_capenable |= IFCAP_TSO4; 2275 ifp->if_hwassist |= CSUM_TSO; 2276 } 2277 } else { 2278 ifp->if_capenable &= ~(IFCAP_TSO4); 2279 ifp->if_hwassist &= ~(CSUM_TSO); 2280 } 2281 if (ifr->ifreqcap |= IFCAP_LRO) { 2282 ifp->if_capenable |= IFCAP_LRO; 2283 } else { 2284 ifp->if_capenable &= ~(IFCAP_LRO); 2285 } 2286#endif 2287 mtx_unlock(&xnb->sc_lock); 2288 break; 2289 case SIOCSIFMTU: 2290 ifp->if_mtu = ifr->ifr_mtu; 2291 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2292 xnb_ifinit(xnb); 2293 break; 2294 case SIOCADDMULTI: 2295 case SIOCDELMULTI: 2296 case SIOCSIFMEDIA: 2297 case SIOCGIFMEDIA: 2298 error = ifmedia_ioctl(ifp, ifr, &xnb->sc_media, cmd); 2299 break; 2300 default: 2301 error = ether_ioctl(ifp, cmd, data); 2302 break; 2303 } 2304 return (error); 2305} 2306 2307static void 2308xnb_start_locked(struct ifnet *ifp) 2309{ 2310 netif_rx_back_ring_t *rxb; 2311 struct xnb_softc *xnb; 2312 struct mbuf *mbufc; 2313 RING_IDX req_prod_local; 2314 2315 xnb = ifp->if_softc; 2316 rxb = &xnb->ring_configs[XNB_RING_TYPE_RX].back_ring.rx_ring; 2317 2318 if (!xnb->carrier) 2319 return; 2320 2321 do { 2322 int out_of_space = 0; 2323 int notify; 2324 req_prod_local = rxb->sring->req_prod; 2325 xen_rmb(); 2326 for (;;) { 2327 int error; 2328 2329 IF_DEQUEUE(&ifp->if_snd, mbufc); 2330 if (mbufc == NULL) 2331 break; 2332 error = xnb_send(rxb, xnb->otherend_id, mbufc, 2333 xnb->rx_gnttab); 2334 switch (error) { 2335 case EAGAIN: 2336 /* 2337 * Insufficient space in the ring. 2338 * Requeue pkt and send when space is 2339 * available. 2340 */ 2341 IF_PREPEND(&ifp->if_snd, mbufc); 2342 /* 2343 * Perhaps the frontend missed an IRQ 2344 * and went to sleep. Notify it to wake 2345 * it up. 2346 */ 2347 out_of_space = 1; 2348 break; 2349 2350 case EINVAL: 2351 /* OS gave a corrupt packet. Drop it.*/ 2352 ifp->if_oerrors++; 2353 /* FALLTHROUGH */ 2354 default: 2355 /* Send succeeded, or packet had error. 2356 * Free the packet */ 2357 ifp->if_opackets++; 2358 if (mbufc) 2359 m_freem(mbufc); 2360 break; 2361 } 2362 if (out_of_space != 0) 2363 break; 2364 } 2365 2366 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(rxb, notify); 2367 if ((notify != 0) || (out_of_space != 0)) 2368 xen_intr_signal(xnb->xen_intr_handle); 2369 rxb->sring->req_event = req_prod_local + 1; 2370 xen_mb(); 2371 } while (rxb->sring->req_prod != req_prod_local) ; 2372} 2373 2374/** 2375 * Sends one packet to the ring. Blocks until the packet is on the ring 2376 * \param[in] mbufc Contains one packet to send. Caller must free 2377 * \param[in,out] rxb The packet will be pushed onto this ring, but the 2378 * otherend will not be notified. 2379 * \param[in] otherend The domain ID of the other end of the connection 2380 * \retval EAGAIN The ring did not have enough space for the packet. 2381 * The ring has not been modified 2382 * \param[in,out] gnttab Pointer to enough memory for a grant table. We make 2383 * this a function parameter so that we will take less 2384 * stack space. 2385 * \retval EINVAL mbufc was corrupt or not convertible into a pkt 2386 */ 2387static int 2388xnb_send(netif_rx_back_ring_t *ring, domid_t otherend, const struct mbuf *mbufc, 2389 gnttab_copy_table gnttab) 2390{ 2391 struct xnb_pkt pkt; 2392 int error, n_entries, n_reqs; 2393 RING_IDX space; 2394 2395 space = ring->sring->req_prod - ring->req_cons; 2396 error = xnb_mbufc2pkt(mbufc, &pkt, ring->rsp_prod_pvt, space); 2397 if (error != 0) 2398 return error; 2399 n_entries = xnb_rxpkt2gnttab(&pkt, mbufc, gnttab, ring, otherend); 2400 if (n_entries != 0) { 2401 int __unused hv_ret = HYPERVISOR_grant_table_op(GNTTABOP_copy, 2402 gnttab, n_entries); 2403 KASSERT(hv_ret == 0, ("HYPERVISOR_grant_table_op returned %d\n", 2404 hv_ret)); 2405 } 2406 2407 n_reqs = xnb_rxpkt2rsp(&pkt, gnttab, n_entries, ring); 2408 2409 return 0; 2410} 2411 2412static void 2413xnb_start(struct ifnet *ifp) 2414{ 2415 struct xnb_softc *xnb; 2416 2417 xnb = ifp->if_softc; 2418 mtx_lock(&xnb->rx_lock); 2419 xnb_start_locked(ifp); 2420 mtx_unlock(&xnb->rx_lock); 2421} 2422 2423/* equivalent of network_open() in Linux */ 2424static void 2425xnb_ifinit_locked(struct xnb_softc *xnb) 2426{ 2427 struct ifnet *ifp; 2428 2429 ifp = xnb->xnb_ifp; 2430 2431 mtx_assert(&xnb->sc_lock, MA_OWNED); 2432 2433 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2434 return; 2435 2436 xnb_stop(xnb); 2437 2438 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2439 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2440 if_link_state_change(ifp, LINK_STATE_UP); 2441} 2442 2443 2444static void 2445xnb_ifinit(void *xsc) 2446{ 2447 struct xnb_softc *xnb = xsc; 2448 2449 mtx_lock(&xnb->sc_lock); 2450 xnb_ifinit_locked(xnb); 2451 mtx_unlock(&xnb->sc_lock); 2452} 2453 2454 2455/** 2456 * Read the 'mac' node at the given device's node in the store, and parse that 2457 * as colon-separated octets, placing result the given mac array. mac must be 2458 * a preallocated array of length ETHER_ADDR_LEN ETH_ALEN (as declared in 2459 * net/ethernet.h). 2460 * Return 0 on success, or errno on error. 2461 */ 2462static int 2463xen_net_read_mac(device_t dev, uint8_t mac[]) 2464{ 2465 char *s, *e, *macstr; 2466 const char *path; 2467 int error = 0; 2468 int i; 2469 2470 path = xenbus_get_node(dev); 2471 error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr); 2472 if (error != 0) { 2473 xenbus_dev_fatal(dev, error, "parsing %s/mac", path); 2474 } else { 2475 s = macstr; 2476 for (i = 0; i < ETHER_ADDR_LEN; i++) { 2477 mac[i] = strtoul(s, &e, 16); 2478 if (s == e || (e[0] != ':' && e[0] != 0)) { 2479 error = ENOENT; 2480 break; 2481 } 2482 s = &e[1]; 2483 } 2484 free(macstr, M_XENBUS); 2485 } 2486 return error; 2487} 2488 2489 2490/** 2491 * Callback used by the generic networking code to tell us when our carrier 2492 * state has changed. Since we don't have a physical carrier, we don't care 2493 */ 2494static int 2495xnb_ifmedia_upd(struct ifnet *ifp) 2496{ 2497 return (0); 2498} 2499 2500/** 2501 * Callback used by the generic networking code to ask us what our carrier 2502 * state is. Since we don't have a physical carrier, this is very simple 2503 */ 2504static void 2505xnb_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2506{ 2507 ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE; 2508 ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; 2509} 2510 2511 2512/*---------------------------- NewBus Registration ---------------------------*/ 2513static device_method_t xnb_methods[] = { 2514 /* Device interface */ 2515 DEVMETHOD(device_probe, xnb_probe), 2516 DEVMETHOD(device_attach, xnb_attach), 2517 DEVMETHOD(device_detach, xnb_detach), 2518 DEVMETHOD(device_shutdown, bus_generic_shutdown), 2519 DEVMETHOD(device_suspend, xnb_suspend), 2520 DEVMETHOD(device_resume, xnb_resume), 2521 2522 /* Xenbus interface */ 2523 DEVMETHOD(xenbus_otherend_changed, xnb_frontend_changed), 2524 2525 { 0, 0 } 2526}; 2527 2528static driver_t xnb_driver = { 2529 "xnb", 2530 xnb_methods, 2531 sizeof(struct xnb_softc), 2532}; 2533devclass_t xnb_devclass; 2534 2535DRIVER_MODULE(xnb, xenbusb_back, xnb_driver, xnb_devclass, 0, 0); 2536 2537 2538/*-------------------------- Unit Tests -------------------------------------*/ 2539#ifdef XNB_DEBUG 2540#include "netback_unit_tests.c" 2541#endif 2542