136#define virt_to_offset(x) ((x) & (PAGE_SIZE - 1)) 137 138/** 139 * Predefined array type of grant table copy descriptors. Used to pass around 140 * statically allocated memory structures. 141 */ 142typedef struct gnttab_copy gnttab_copy_table[GNTTAB_LEN]; 143 144/*--------------------------- Forward Declarations ---------------------------*/ 145struct xnb_softc; 146struct xnb_pkt; 147 148static void xnb_attach_failed(struct xnb_softc *xnb, 149 int err, const char *fmt, ...) 150 __printflike(3,4); 151static int xnb_shutdown(struct xnb_softc *xnb); 152static int create_netdev(device_t dev); 153static int xnb_detach(device_t dev); 154static int xnb_ifmedia_upd(struct ifnet *ifp); 155static void xnb_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); 156static void xnb_intr(void *arg); 157static int xnb_send(netif_rx_back_ring_t *rxb, domid_t otherend, 158 const struct mbuf *mbufc, gnttab_copy_table gnttab); 159static int xnb_recv(netif_tx_back_ring_t *txb, domid_t otherend, 160 struct mbuf **mbufc, struct ifnet *ifnet, 161 gnttab_copy_table gnttab); 162static int xnb_ring2pkt(struct xnb_pkt *pkt, 163 const netif_tx_back_ring_t *tx_ring, 164 RING_IDX start); 165static void xnb_txpkt2rsp(const struct xnb_pkt *pkt, 166 netif_tx_back_ring_t *ring, int error); 167static struct mbuf *xnb_pkt2mbufc(const struct xnb_pkt *pkt, struct ifnet *ifp); 168static int xnb_txpkt2gnttab(const struct xnb_pkt *pkt, 169 const struct mbuf *mbufc, 170 gnttab_copy_table gnttab, 171 const netif_tx_back_ring_t *txb, 172 domid_t otherend_id); 173static void xnb_update_mbufc(struct mbuf *mbufc, 174 const gnttab_copy_table gnttab, int n_entries); 175static int xnb_mbufc2pkt(const struct mbuf *mbufc, 176 struct xnb_pkt *pkt, 177 RING_IDX start, int space); 178static int xnb_rxpkt2gnttab(const struct xnb_pkt *pkt, 179 const struct mbuf *mbufc, 180 gnttab_copy_table gnttab, 181 const netif_rx_back_ring_t *rxb, 182 domid_t otherend_id); 183static int xnb_rxpkt2rsp(const struct xnb_pkt *pkt, 184 const gnttab_copy_table gnttab, int n_entries, 185 netif_rx_back_ring_t *ring); 186static void xnb_stop(struct xnb_softc*); 187static int xnb_ioctl(struct ifnet*, u_long, caddr_t); 188static void xnb_start_locked(struct ifnet*); 189static void xnb_start(struct ifnet*); 190static void xnb_ifinit_locked(struct xnb_softc*); 191static void xnb_ifinit(void*); 192#ifdef XNB_DEBUG 193static int xnb_unit_test_main(SYSCTL_HANDLER_ARGS); 194static int xnb_dump_rings(SYSCTL_HANDLER_ARGS); 195#endif 196#if defined(INET) || defined(INET6) 197static void xnb_add_mbuf_cksum(struct mbuf *mbufc); 198#endif 199/*------------------------------ Data Structures -----------------------------*/ 200 201 202/** 203 * Representation of a xennet packet. Simplified version of a packet as 204 * stored in the Xen tx ring. Applicable to both RX and TX packets 205 */ 206struct xnb_pkt{ 207 /** 208 * Array index of the first data-bearing (eg, not extra info) entry 209 * for this packet 210 */ 211 RING_IDX car; 212 213 /** 214 * Array index of the second data-bearing entry for this packet. 215 * Invalid if the packet has only one data-bearing entry. If the 216 * packet has more than two data-bearing entries, then the second 217 * through the last will be sequential modulo the ring size 218 */ 219 RING_IDX cdr; 220 221 /** 222 * Optional extra info. Only valid if flags contains 223 * NETTXF_extra_info. Note that extra.type will always be 224 * XEN_NETIF_EXTRA_TYPE_GSO. Currently, no known netfront or netback 225 * driver will ever set XEN_NETIF_EXTRA_TYPE_MCAST_* 226 */ 227 netif_extra_info_t extra; 228 229 /** Size of entire packet in bytes. */ 230 uint16_t size; 231 232 /** The size of the first entry's data in bytes */ 233 uint16_t car_size; 234 235 /** 236 * Either NETTXF_ or NETRXF_ flags. Note that the flag values are 237 * not the same for TX and RX packets 238 */ 239 uint16_t flags; 240 241 /** 242 * The number of valid data-bearing entries (either netif_tx_request's 243 * or netif_rx_response's) in the packet. If this is 0, it means the 244 * entire packet is invalid. 245 */ 246 uint16_t list_len; 247 248 /** There was an error processing the packet */ 249 uint8_t error; 250}; 251 252/** xnb_pkt method: initialize it */ 253static inline void 254xnb_pkt_initialize(struct xnb_pkt *pxnb) 255{ 256 bzero(pxnb, sizeof(*pxnb)); 257} 258 259/** xnb_pkt method: mark the packet as valid */ 260static inline void 261xnb_pkt_validate(struct xnb_pkt *pxnb) 262{ 263 pxnb->error = 0; 264}; 265 266/** xnb_pkt method: mark the packet as invalid */ 267static inline void 268xnb_pkt_invalidate(struct xnb_pkt *pxnb) 269{ 270 pxnb->error = 1; 271}; 272 273/** xnb_pkt method: Check whether the packet is valid */ 274static inline int 275xnb_pkt_is_valid(const struct xnb_pkt *pxnb) 276{ 277 return (! pxnb->error); 278} 279 280#ifdef XNB_DEBUG 281/** xnb_pkt method: print the packet's contents in human-readable format*/ 282static void __unused 283xnb_dump_pkt(const struct xnb_pkt *pkt) { 284 if (pkt == NULL) { 285 DPRINTF("Was passed a null pointer.\n"); 286 return; 287 } 288 DPRINTF("pkt address= %p\n", pkt); 289 DPRINTF("pkt->size=%d\n", pkt->size); 290 DPRINTF("pkt->car_size=%d\n", pkt->car_size); 291 DPRINTF("pkt->flags=0x%04x\n", pkt->flags); 292 DPRINTF("pkt->list_len=%d\n", pkt->list_len); 293 /* DPRINTF("pkt->extra"); TODO */ 294 DPRINTF("pkt->car=%d\n", pkt->car); 295 DPRINTF("pkt->cdr=%d\n", pkt->cdr); 296 DPRINTF("pkt->error=%d\n", pkt->error); 297} 298#endif /* XNB_DEBUG */ 299 300static void 301xnb_dump_txreq(RING_IDX idx, const struct netif_tx_request *txreq) 302{ 303 if (txreq != NULL) { 304 DPRINTF("netif_tx_request index =%u\n", idx); 305 DPRINTF("netif_tx_request.gref =%u\n", txreq->gref); 306 DPRINTF("netif_tx_request.offset=%hu\n", txreq->offset); 307 DPRINTF("netif_tx_request.flags =%hu\n", txreq->flags); 308 DPRINTF("netif_tx_request.id =%hu\n", txreq->id); 309 DPRINTF("netif_tx_request.size =%hu\n", txreq->size); 310 } 311} 312 313 314/** 315 * \brief Configuration data for a shared memory request ring 316 * used to communicate with the front-end client of this 317 * this driver. 318 */ 319struct xnb_ring_config { 320 /** 321 * Runtime structures for ring access. Unfortunately, TX and RX rings 322 * use different data structures, and that cannot be changed since it 323 * is part of the interdomain protocol. 324 */ 325 union{ 326 netif_rx_back_ring_t rx_ring; 327 netif_tx_back_ring_t tx_ring; 328 } back_ring; 329 330 /** 331 * The device bus address returned by the hypervisor when 332 * mapping the ring and required to unmap it when a connection 333 * is torn down. 334 */ 335 uint64_t bus_addr; 336 337 /** The pseudo-physical address where ring memory is mapped.*/ 338 uint64_t gnt_addr; 339 340 /** KVA address where ring memory is mapped. */ 341 vm_offset_t va; 342 343 /** 344 * Grant table handles, one per-ring page, returned by the 345 * hyperpervisor upon mapping of the ring and required to 346 * unmap it when a connection is torn down. 347 */ 348 grant_handle_t handle; 349 350 /** The number of ring pages mapped for the current connection. */ 351 unsigned ring_pages; 352 353 /** 354 * The grant references, one per-ring page, supplied by the 355 * front-end, allowing us to reference the ring pages in the 356 * front-end's domain and to map these pages into our own domain. 357 */ 358 grant_ref_t ring_ref; 359}; 360 361/** 362 * Per-instance connection state flags. 363 */ 364typedef enum 365{ 366 /** Communication with the front-end has been established. */ 367 XNBF_RING_CONNECTED = 0x01, 368 369 /** 370 * Front-end requests exist in the ring and are waiting for 371 * xnb_xen_req objects to free up. 372 */ 373 XNBF_RESOURCE_SHORTAGE = 0x02, 374 375 /** Connection teardown has started. */ 376 XNBF_SHUTDOWN = 0x04, 377 378 /** A thread is already performing shutdown processing. */ 379 XNBF_IN_SHUTDOWN = 0x08 380} xnb_flag_t; 381 382/** 383 * Types of rings. Used for array indices and to identify a ring's control 384 * data structure type 385 */ 386typedef enum{ 387 XNB_RING_TYPE_TX = 0, /* ID of TX rings, used for array indices */ 388 XNB_RING_TYPE_RX = 1, /* ID of RX rings, used for array indices */ 389 XNB_NUM_RING_TYPES 390} xnb_ring_type_t; 391 392/** 393 * Per-instance configuration data. 394 */ 395struct xnb_softc { 396 /** NewBus device corresponding to this instance. */ 397 device_t dev; 398 399 /* Media related fields */ 400 401 /** Generic network media state */ 402 struct ifmedia sc_media; 403 404 /** Media carrier info */ 405 struct ifnet *xnb_ifp; 406 407 /** Our own private carrier state */ 408 unsigned carrier; 409 410 /** Device MAC Address */ 411 uint8_t mac[ETHER_ADDR_LEN]; 412 413 /* Xen related fields */ 414 415 /** 416 * \brief The netif protocol abi in effect. 417 * 418 * There are situations where the back and front ends can 419 * have a different, native abi (e.g. intel x86_64 and 420 * 32bit x86 domains on the same machine). The back-end 421 * always accomodates the front-end's native abi. That 422 * value is pulled from the XenStore and recorded here. 423 */ 424 int abi; 425 426 /** 427 * Name of the bridge to which this VIF is connected, if any 428 * This field is dynamically allocated by xenbus and must be free()ed 429 * when no longer needed 430 */ 431 char *bridge; 432 433 /** The interrupt driven even channel used to signal ring events. */ 434 evtchn_port_t evtchn; 435 436 /** Xen device handle.*/ 437 long handle; 438 439 /** Handle to the communication ring event channel. */ 440 xen_intr_handle_t xen_intr_handle; 441 442 /** 443 * \brief Cached value of the front-end's domain id. 444 * 445 * This value is used at once for each mapped page in 446 * a transaction. We cache it to avoid incuring the 447 * cost of an ivar access every time this is needed. 448 */ 449 domid_t otherend_id; 450 451 /** 452 * Undocumented frontend feature. Has something to do with 453 * scatter/gather IO 454 */ 455 uint8_t can_sg; 456 /** Undocumented frontend feature */ 457 uint8_t gso; 458 /** Undocumented frontend feature */ 459 uint8_t gso_prefix; 460 /** Can checksum TCP/UDP over IPv4 */ 461 uint8_t ip_csum; 462 463 /* Implementation related fields */ 464 /** 465 * Preallocated grant table copy descriptor for RX operations. 466 * Access must be protected by rx_lock 467 */ 468 gnttab_copy_table rx_gnttab; 469 470 /** 471 * Preallocated grant table copy descriptor for TX operations. 472 * Access must be protected by tx_lock 473 */ 474 gnttab_copy_table tx_gnttab; 475 476 /** 477 * Resource representing allocated physical address space 478 * associated with our per-instance kva region. 479 */ 480 struct resource *pseudo_phys_res; 481 482 /** Resource id for allocated physical address space. */ 483 int pseudo_phys_res_id; 484 485 /** Ring mapping and interrupt configuration data. */ 486 struct xnb_ring_config ring_configs[XNB_NUM_RING_TYPES]; 487 488 /** 489 * Global pool of kva used for mapping remote domain ring 490 * and I/O transaction data. 491 */ 492 vm_offset_t kva; 493 494 /** Psuedo-physical address corresponding to kva. */ 495 uint64_t gnt_base_addr; 496 497 /** Various configuration and state bit flags. */ 498 xnb_flag_t flags; 499 500 /** Mutex protecting per-instance data in the receive path. */ 501 struct mtx rx_lock; 502 503 /** Mutex protecting per-instance data in the softc structure. */ 504 struct mtx sc_lock; 505 506 /** Mutex protecting per-instance data in the transmit path. */ 507 struct mtx tx_lock; 508 509 /** The size of the global kva pool. */ 510 int kva_size; 511 512 /** Name of the interface */ 513 char if_name[IFNAMSIZ]; 514}; 515 516/*---------------------------- Debugging functions ---------------------------*/ 517#ifdef XNB_DEBUG 518static void __unused 519xnb_dump_gnttab_copy(const struct gnttab_copy *entry) 520{ 521 if (entry == NULL) { 522 printf("NULL grant table pointer\n"); 523 return; 524 } 525 526 if (entry->flags & GNTCOPY_dest_gref) 527 printf("gnttab dest ref=\t%u\n", entry->dest.u.ref); 528 else 529 printf("gnttab dest gmfn=\t%lu\n", entry->dest.u.gmfn); 530 printf("gnttab dest offset=\t%hu\n", entry->dest.offset); 531 printf("gnttab dest domid=\t%hu\n", entry->dest.domid); 532 if (entry->flags & GNTCOPY_source_gref) 533 printf("gnttab source ref=\t%u\n", entry->source.u.ref); 534 else 535 printf("gnttab source gmfn=\t%lu\n", entry->source.u.gmfn); 536 printf("gnttab source offset=\t%hu\n", entry->source.offset); 537 printf("gnttab source domid=\t%hu\n", entry->source.domid); 538 printf("gnttab len=\t%hu\n", entry->len); 539 printf("gnttab flags=\t%hu\n", entry->flags); 540 printf("gnttab status=\t%hd\n", entry->status); 541} 542 543static int 544xnb_dump_rings(SYSCTL_HANDLER_ARGS) 545{ 546 static char results[720]; 547 struct xnb_softc const* xnb = (struct xnb_softc*)arg1; 548 netif_rx_back_ring_t const* rxb = 549 &xnb->ring_configs[XNB_RING_TYPE_RX].back_ring.rx_ring; 550 netif_tx_back_ring_t const* txb = 551 &xnb->ring_configs[XNB_RING_TYPE_TX].back_ring.tx_ring; 552 553 /* empty the result strings */ 554 results[0] = 0; 555 556 if ( !txb || !txb->sring || !rxb || !rxb->sring ) 557 return (SYSCTL_OUT(req, results, strnlen(results, 720))); 558 559 snprintf(results, 720, 560 "\n\t%35s %18s\n" /* TX, RX */ 561 "\t%16s %18d %18d\n" /* req_cons */ 562 "\t%16s %18d %18d\n" /* nr_ents */ 563 "\t%16s %18d %18d\n" /* rsp_prod_pvt */ 564 "\t%16s %18p %18p\n" /* sring */ 565 "\t%16s %18d %18d\n" /* req_prod */ 566 "\t%16s %18d %18d\n" /* req_event */ 567 "\t%16s %18d %18d\n" /* rsp_prod */ 568 "\t%16s %18d %18d\n", /* rsp_event */ 569 "TX", "RX", 570 "req_cons", txb->req_cons, rxb->req_cons, 571 "nr_ents", txb->nr_ents, rxb->nr_ents, 572 "rsp_prod_pvt", txb->rsp_prod_pvt, rxb->rsp_prod_pvt, 573 "sring", txb->sring, rxb->sring, 574 "sring->req_prod", txb->sring->req_prod, rxb->sring->req_prod, 575 "sring->req_event", txb->sring->req_event, rxb->sring->req_event, 576 "sring->rsp_prod", txb->sring->rsp_prod, rxb->sring->rsp_prod, 577 "sring->rsp_event", txb->sring->rsp_event, rxb->sring->rsp_event); 578 579 return (SYSCTL_OUT(req, results, strnlen(results, 720))); 580} 581 582static void __unused 583xnb_dump_mbuf(const struct mbuf *m) 584{ 585 int len; 586 uint8_t *d; 587 if (m == NULL) 588 return; 589 590 printf("xnb_dump_mbuf:\n"); 591 if (m->m_flags & M_PKTHDR) { 592 printf(" flowid=%10d, csum_flags=%#8x, csum_data=%#8x, " 593 "tso_segsz=%5hd\n", 594 m->m_pkthdr.flowid, (int)m->m_pkthdr.csum_flags, 595 m->m_pkthdr.csum_data, m->m_pkthdr.tso_segsz); 596 printf(" rcvif=%16p, len=%19d\n", 597 m->m_pkthdr.rcvif, m->m_pkthdr.len); 598 } 599 printf(" m_next=%16p, m_nextpk=%16p, m_data=%16p\n", 600 m->m_next, m->m_nextpkt, m->m_data); 601 printf(" m_len=%17d, m_flags=%#15x, m_type=%18u\n", 602 m->m_len, m->m_flags, m->m_type); 603 604 len = m->m_len; 605 d = mtod(m, uint8_t*); 606 while (len > 0) { 607 int i; 608 printf(" "); 609 for (i = 0; (i < 16) && (len > 0); i++, len--) { 610 printf("%02hhx ", *(d++)); 611 } 612 printf("\n"); 613 } 614} 615#endif /* XNB_DEBUG */ 616 617/*------------------------ Inter-Domain Communication ------------------------*/ 618/** 619 * Free dynamically allocated KVA or pseudo-physical address allocations. 620 * 621 * \param xnb Per-instance xnb configuration structure. 622 */ 623static void 624xnb_free_communication_mem(struct xnb_softc *xnb) 625{ 626 if (xnb->kva != 0) { 627 if (xnb->pseudo_phys_res != NULL) { 628 xenmem_free(xnb->dev, xnb->pseudo_phys_res_id, 629 xnb->pseudo_phys_res); 630 xnb->pseudo_phys_res = NULL; 631 } 632 } 633 xnb->kva = 0; 634 xnb->gnt_base_addr = 0; 635} 636 637/** 638 * Cleanup all inter-domain communication mechanisms. 639 * 640 * \param xnb Per-instance xnb configuration structure. 641 */ 642static int 643xnb_disconnect(struct xnb_softc *xnb) 644{ 645 struct gnttab_unmap_grant_ref gnts[XNB_NUM_RING_TYPES]; 646 int error; 647 int i; 648 649 if (xnb->xen_intr_handle != NULL) 650 xen_intr_unbind(&xnb->xen_intr_handle); 651 652 /* 653 * We may still have another thread currently processing requests. We 654 * must acquire the rx and tx locks to make sure those threads are done, 655 * but we can release those locks as soon as we acquire them, because no 656 * more interrupts will be arriving. 657 */ 658 mtx_lock(&xnb->tx_lock); 659 mtx_unlock(&xnb->tx_lock); 660 mtx_lock(&xnb->rx_lock); 661 mtx_unlock(&xnb->rx_lock); 662 663 /* Free malloc'd softc member variables */ 664 if (xnb->bridge != NULL) { 665 free(xnb->bridge, M_XENSTORE); 666 xnb->bridge = NULL; 667 } 668 669 /* All request processing has stopped, so unmap the rings */ 670 for (i=0; i < XNB_NUM_RING_TYPES; i++) { 671 gnts[i].host_addr = xnb->ring_configs[i].gnt_addr; 672 gnts[i].dev_bus_addr = xnb->ring_configs[i].bus_addr; 673 gnts[i].handle = xnb->ring_configs[i].handle; 674 } 675 error = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, gnts, 676 XNB_NUM_RING_TYPES); 677 KASSERT(error == 0, ("Grant table unmap op failed (%d)", error)); 678 679 xnb_free_communication_mem(xnb); 680 /* 681 * Zero the ring config structs because the pointers, handles, and 682 * grant refs contained therein are no longer valid. 683 */ 684 bzero(&xnb->ring_configs[XNB_RING_TYPE_TX], 685 sizeof(struct xnb_ring_config)); 686 bzero(&xnb->ring_configs[XNB_RING_TYPE_RX], 687 sizeof(struct xnb_ring_config)); 688 689 xnb->flags &= ~XNBF_RING_CONNECTED; 690 return (0); 691} 692 693/** 694 * Map a single shared memory ring into domain local address space and 695 * initialize its control structure 696 * 697 * \param xnb Per-instance xnb configuration structure 698 * \param ring_type Array index of this ring in the xnb's array of rings 699 * \return An errno 700 */ 701static int 702xnb_connect_ring(struct xnb_softc *xnb, xnb_ring_type_t ring_type) 703{ 704 struct gnttab_map_grant_ref gnt; 705 struct xnb_ring_config *ring = &xnb->ring_configs[ring_type]; 706 int error; 707 708 /* TX ring type = 0, RX =1 */ 709 ring->va = xnb->kva + ring_type * PAGE_SIZE; 710 ring->gnt_addr = xnb->gnt_base_addr + ring_type * PAGE_SIZE; 711 712 gnt.host_addr = ring->gnt_addr; 713 gnt.flags = GNTMAP_host_map; 714 gnt.ref = ring->ring_ref; 715 gnt.dom = xnb->otherend_id; 716 717 error = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &gnt, 1); 718 if (error != 0) 719 panic("netback: Ring page grant table op failed (%d)", error); 720 721 if (gnt.status != 0) { 722 ring->va = 0; 723 error = EACCES; 724 xenbus_dev_fatal(xnb->dev, error, 725 "Ring shared page mapping failed. " 726 "Status %d.", gnt.status); 727 } else { 728 ring->handle = gnt.handle; 729 ring->bus_addr = gnt.dev_bus_addr; 730 731 if (ring_type == XNB_RING_TYPE_TX) { 732 BACK_RING_INIT(&ring->back_ring.tx_ring, 733 (netif_tx_sring_t*)ring->va, 734 ring->ring_pages * PAGE_SIZE); 735 } else if (ring_type == XNB_RING_TYPE_RX) { 736 BACK_RING_INIT(&ring->back_ring.rx_ring, 737 (netif_rx_sring_t*)ring->va, 738 ring->ring_pages * PAGE_SIZE); 739 } else { 740 xenbus_dev_fatal(xnb->dev, error, 741 "Unknown ring type %d", ring_type); 742 } 743 } 744 745 return error; 746} 747 748/** 749 * Setup the shared memory rings and bind an interrupt to the event channel 750 * used to notify us of ring changes. 751 * 752 * \param xnb Per-instance xnb configuration structure. 753 */ 754static int 755xnb_connect_comms(struct xnb_softc *xnb) 756{ 757 int error; 758 xnb_ring_type_t i; 759 760 if ((xnb->flags & XNBF_RING_CONNECTED) != 0) 761 return (0); 762 763 /* 764 * Kva for our rings are at the tail of the region of kva allocated 765 * by xnb_alloc_communication_mem(). 766 */ 767 for (i=0; i < XNB_NUM_RING_TYPES; i++) { 768 error = xnb_connect_ring(xnb, i); 769 if (error != 0) 770 return error; 771 } 772 773 xnb->flags |= XNBF_RING_CONNECTED; 774 775 error = xen_intr_bind_remote_port(xnb->dev, 776 xnb->otherend_id, 777 xnb->evtchn, 778 /*filter*/NULL, 779 xnb_intr, /*arg*/xnb, 780 INTR_TYPE_BIO | INTR_MPSAFE, 781 &xnb->xen_intr_handle); 782 if (error != 0) { 783 (void)xnb_disconnect(xnb); 784 xenbus_dev_fatal(xnb->dev, error, "binding event channel"); 785 return (error); 786 } 787 788 DPRINTF("rings connected!\n"); 789 790 return (0); 791} 792 793/** 794 * Size KVA and pseudo-physical address allocations based on negotiated 795 * values for the size and number of I/O requests, and the size of our 796 * communication ring. 797 * 798 * \param xnb Per-instance xnb configuration structure. 799 * 800 * These address spaces are used to dynamically map pages in the 801 * front-end's domain into our own. 802 */ 803static int 804xnb_alloc_communication_mem(struct xnb_softc *xnb) 805{ 806 xnb_ring_type_t i; 807 808 xnb->kva_size = 0; 809 for (i=0; i < XNB_NUM_RING_TYPES; i++) { 810 xnb->kva_size += xnb->ring_configs[i].ring_pages * PAGE_SIZE; 811 } 812 813 /* 814 * Reserve a range of pseudo physical memory that we can map 815 * into kva. These pages will only be backed by machine 816 * pages ("real memory") during the lifetime of front-end requests 817 * via grant table operations. We will map the netif tx and rx rings 818 * into this space. 819 */ 820 xnb->pseudo_phys_res_id = 0; 821 xnb->pseudo_phys_res = xenmem_alloc(xnb->dev, &xnb->pseudo_phys_res_id, 822 xnb->kva_size); 823 if (xnb->pseudo_phys_res == NULL) { 824 xnb->kva = 0; 825 return (ENOMEM); 826 } 827 xnb->kva = (vm_offset_t)rman_get_virtual(xnb->pseudo_phys_res); 828 xnb->gnt_base_addr = rman_get_start(xnb->pseudo_phys_res); 829 return (0); 830} 831 832/** 833 * Collect information from the XenStore related to our device and its frontend 834 * 835 * \param xnb Per-instance xnb configuration structure. 836 */ 837static int 838xnb_collect_xenstore_info(struct xnb_softc *xnb) 839{ 840 /** 841 * \todo Linux collects the following info. We should collect most 842 * of this, too: 843 * "feature-rx-notify" 844 */ 845 const char *otherend_path; 846 const char *our_path; 847 int err; 848 unsigned int rx_copy, bridge_len; 849 uint8_t no_csum_offload; 850 851 otherend_path = xenbus_get_otherend_path(xnb->dev); 852 our_path = xenbus_get_node(xnb->dev); 853 854 /* Collect the critical communication parameters */ 855 err = xs_gather(XST_NIL, otherend_path, 856 "tx-ring-ref", "%l" PRIu32, 857 &xnb->ring_configs[XNB_RING_TYPE_TX].ring_ref, 858 "rx-ring-ref", "%l" PRIu32, 859 &xnb->ring_configs[XNB_RING_TYPE_RX].ring_ref, 860 "event-channel", "%" PRIu32, &xnb->evtchn, 861 NULL); 862 if (err != 0) { 863 xenbus_dev_fatal(xnb->dev, err, 864 "Unable to retrieve ring information from " 865 "frontend %s. Unable to connect.", 866 otherend_path); 867 return (err); 868 } 869 870 /* Collect the handle from xenstore */ 871 err = xs_scanf(XST_NIL, our_path, "handle", NULL, "%li", &xnb->handle); 872 if (err != 0) { 873 xenbus_dev_fatal(xnb->dev, err, 874 "Error reading handle from frontend %s. " 875 "Unable to connect.", otherend_path); 876 } 877 878 /* 879 * Collect the bridgename, if any. We do not need bridge_len; we just 880 * throw it away 881 */ 882 err = xs_read(XST_NIL, our_path, "bridge", &bridge_len, 883 (void**)&xnb->bridge); 884 if (err != 0) 885 xnb->bridge = NULL; 886 887 /* 888 * Does the frontend request that we use rx copy? If not, return an 889 * error because this driver only supports rx copy. 890 */ 891 err = xs_scanf(XST_NIL, otherend_path, "request-rx-copy", NULL, 892 "%" PRIu32, &rx_copy); 893 if (err == ENOENT) { 894 err = 0; 895 rx_copy = 0; 896 } 897 if (err < 0) { 898 xenbus_dev_fatal(xnb->dev, err, "reading %s/request-rx-copy", 899 otherend_path); 900 return err; 901 } 902 /** 903 * \todo: figure out the exact meaning of this feature, and when 904 * the frontend will set it to true. It should be set to true 905 * at some point 906 */ 907/* if (!rx_copy)*/ 908/* return EOPNOTSUPP;*/ 909 910 /** \todo Collect the rx notify feature */ 911 912 /* Collect the feature-sg. */ 913 if (xs_scanf(XST_NIL, otherend_path, "feature-sg", NULL, 914 "%hhu", &xnb->can_sg) < 0) 915 xnb->can_sg = 0; 916 917 /* Collect remaining frontend features */ 918 if (xs_scanf(XST_NIL, otherend_path, "feature-gso-tcpv4", NULL, 919 "%hhu", &xnb->gso) < 0) 920 xnb->gso = 0; 921 922 if (xs_scanf(XST_NIL, otherend_path, "feature-gso-tcpv4-prefix", NULL, 923 "%hhu", &xnb->gso_prefix) < 0) 924 xnb->gso_prefix = 0; 925 926 if (xs_scanf(XST_NIL, otherend_path, "feature-no-csum-offload", NULL, 927 "%hhu", &no_csum_offload) < 0) 928 no_csum_offload = 0; 929 xnb->ip_csum = (no_csum_offload == 0); 930 931 return (0); 932} 933 934/** 935 * Supply information about the physical device to the frontend 936 * via XenBus. 937 * 938 * \param xnb Per-instance xnb configuration structure. 939 */ 940static int 941xnb_publish_backend_info(struct xnb_softc *xnb) 942{ 943 struct xs_transaction xst; 944 const char *our_path; 945 int error; 946 947 our_path = xenbus_get_node(xnb->dev); 948 949 do { 950 error = xs_transaction_start(&xst); 951 if (error != 0) { 952 xenbus_dev_fatal(xnb->dev, error, 953 "Error publishing backend info " 954 "(start transaction)"); 955 break; 956 } 957 958 error = xs_printf(xst, our_path, "feature-sg", 959 "%d", XNB_SG); 960 if (error != 0) 961 break; 962 963 error = xs_printf(xst, our_path, "feature-gso-tcpv4", 964 "%d", XNB_GSO_TCPV4); 965 if (error != 0) 966 break; 967 968 error = xs_printf(xst, our_path, "feature-rx-copy", 969 "%d", XNB_RX_COPY); 970 if (error != 0) 971 break; 972 973 error = xs_printf(xst, our_path, "feature-rx-flip", 974 "%d", XNB_RX_FLIP); 975 if (error != 0) 976 break; 977 978 error = xs_transaction_end(xst, 0); 979 if (error != 0 && error != EAGAIN) { 980 xenbus_dev_fatal(xnb->dev, error, "ending transaction"); 981 break; 982 } 983 984 } while (error == EAGAIN); 985 986 return (error); 987} 988 989/** 990 * Connect to our netfront peer now that it has completed publishing 991 * its configuration into the XenStore. 992 * 993 * \param xnb Per-instance xnb configuration structure. 994 */ 995static void 996xnb_connect(struct xnb_softc *xnb) 997{ 998 int error; 999 1000 if (xenbus_get_state(xnb->dev) == XenbusStateConnected) 1001 return; 1002 1003 if (xnb_collect_xenstore_info(xnb) != 0) 1004 return; 1005 1006 xnb->flags &= ~XNBF_SHUTDOWN; 1007 1008 /* Read front end configuration. */ 1009 1010 /* Allocate resources whose size depends on front-end configuration. */ 1011 error = xnb_alloc_communication_mem(xnb); 1012 if (error != 0) { 1013 xenbus_dev_fatal(xnb->dev, error, 1014 "Unable to allocate communication memory"); 1015 return; 1016 } 1017 1018 /* 1019 * Connect communication channel. 1020 */ 1021 error = xnb_connect_comms(xnb); 1022 if (error != 0) { 1023 /* Specific errors are reported by xnb_connect_comms(). */ 1024 return; 1025 } 1026 xnb->carrier = 1; 1027 1028 /* Ready for I/O. */ 1029 xenbus_set_state(xnb->dev, XenbusStateConnected); 1030} 1031 1032/*-------------------------- Device Teardown Support -------------------------*/ 1033/** 1034 * Perform device shutdown functions. 1035 * 1036 * \param xnb Per-instance xnb configuration structure. 1037 * 1038 * Mark this instance as shutting down, wait for any active requests 1039 * to drain, disconnect from the front-end, and notify any waiters (e.g. 1040 * a thread invoking our detach method) that detach can now proceed. 1041 */ 1042static int 1043xnb_shutdown(struct xnb_softc *xnb) 1044{ 1045 /* 1046 * Due to the need to drop our mutex during some 1047 * xenbus operations, it is possible for two threads 1048 * to attempt to close out shutdown processing at 1049 * the same time. Tell the caller that hits this 1050 * race to try back later. 1051 */ 1052 if ((xnb->flags & XNBF_IN_SHUTDOWN) != 0) 1053 return (EAGAIN); 1054 1055 xnb->flags |= XNBF_SHUTDOWN; 1056 1057 xnb->flags |= XNBF_IN_SHUTDOWN; 1058 1059 mtx_unlock(&xnb->sc_lock); 1060 /* Free the network interface */ 1061 xnb->carrier = 0; 1062 if (xnb->xnb_ifp != NULL) { 1063 ether_ifdetach(xnb->xnb_ifp); 1064 if_free(xnb->xnb_ifp); 1065 xnb->xnb_ifp = NULL; 1066 } 1067 mtx_lock(&xnb->sc_lock); 1068 1069 xnb_disconnect(xnb); 1070 1071 mtx_unlock(&xnb->sc_lock); 1072 if (xenbus_get_state(xnb->dev) < XenbusStateClosing) 1073 xenbus_set_state(xnb->dev, XenbusStateClosing); 1074 mtx_lock(&xnb->sc_lock); 1075 1076 xnb->flags &= ~XNBF_IN_SHUTDOWN; 1077 1078 1079 /* Indicate to xnb_detach() that is it safe to proceed. */ 1080 wakeup(xnb); 1081 1082 return (0); 1083} 1084 1085/** 1086 * Report an attach time error to the console and Xen, and cleanup 1087 * this instance by forcing immediate detach processing. 1088 * 1089 * \param xnb Per-instance xnb configuration structure. 1090 * \param err Errno describing the error. 1091 * \param fmt Printf style format and arguments 1092 */ 1093static void 1094xnb_attach_failed(struct xnb_softc *xnb, int err, const char *fmt, ...) 1095{ 1096 va_list ap; 1097 va_list ap_hotplug; 1098 1099 va_start(ap, fmt); 1100 va_copy(ap_hotplug, ap); 1101 xs_vprintf(XST_NIL, xenbus_get_node(xnb->dev), 1102 "hotplug-error", fmt, ap_hotplug); 1103 va_end(ap_hotplug); 1104 xs_printf(XST_NIL, xenbus_get_node(xnb->dev), 1105 "hotplug-status", "error"); 1106 1107 xenbus_dev_vfatal(xnb->dev, err, fmt, ap); 1108 va_end(ap); 1109 1110 xs_printf(XST_NIL, xenbus_get_node(xnb->dev), 1111 "online", "0"); 1112 xnb_detach(xnb->dev); 1113} 1114 1115/*---------------------------- NewBus Entrypoints ----------------------------*/ 1116/** 1117 * Inspect a XenBus device and claim it if is of the appropriate type. 1118 * 1119 * \param dev NewBus device object representing a candidate XenBus device. 1120 * 1121 * \return 0 for success, errno codes for failure. 1122 */ 1123static int 1124xnb_probe(device_t dev) 1125{ 1126 if (!strcmp(xenbus_get_type(dev), "vif")) { 1127 DPRINTF("Claiming device %d, %s\n", device_get_unit(dev), 1128 devclass_get_name(device_get_devclass(dev))); 1129 device_set_desc(dev, "Backend Virtual Network Device"); 1130 device_quiet(dev); 1131 return (0); 1132 } 1133 return (ENXIO); 1134} 1135 1136/** 1137 * Setup sysctl variables to control various Network Back parameters. 1138 * 1139 * \param xnb Xen Net Back softc. 1140 * 1141 */ 1142static void 1143xnb_setup_sysctl(struct xnb_softc *xnb) 1144{ 1145 struct sysctl_ctx_list *sysctl_ctx = NULL; 1146 struct sysctl_oid *sysctl_tree = NULL; 1147 1148 sysctl_ctx = device_get_sysctl_ctx(xnb->dev); 1149 if (sysctl_ctx == NULL) 1150 return; 1151 1152 sysctl_tree = device_get_sysctl_tree(xnb->dev); 1153 if (sysctl_tree == NULL) 1154 return; 1155 1156#ifdef XNB_DEBUG 1157 SYSCTL_ADD_PROC(sysctl_ctx, 1158 SYSCTL_CHILDREN(sysctl_tree), 1159 OID_AUTO, 1160 "unit_test_results", 1161 CTLTYPE_STRING | CTLFLAG_RD, 1162 xnb, 1163 0, 1164 xnb_unit_test_main, 1165 "A", 1166 "Results of builtin unit tests"); 1167 1168 SYSCTL_ADD_PROC(sysctl_ctx, 1169 SYSCTL_CHILDREN(sysctl_tree), 1170 OID_AUTO, 1171 "dump_rings", 1172 CTLTYPE_STRING | CTLFLAG_RD, 1173 xnb, 1174 0, 1175 xnb_dump_rings, 1176 "A", 1177 "Xennet Back Rings"); 1178#endif /* XNB_DEBUG */ 1179} 1180 1181/** 1182 * Create a network device. 1183 * @param handle device handle 1184 */ 1185int 1186create_netdev(device_t dev) 1187{ 1188 struct ifnet *ifp; 1189 struct xnb_softc *xnb; 1190 int err = 0; 1191 uint32_t handle; 1192 1193 xnb = device_get_softc(dev); 1194 mtx_init(&xnb->sc_lock, "xnb_softc", "xen netback softc lock", MTX_DEF); 1195 mtx_init(&xnb->tx_lock, "xnb_tx", "xen netback tx lock", MTX_DEF); 1196 mtx_init(&xnb->rx_lock, "xnb_rx", "xen netback rx lock", MTX_DEF); 1197 1198 xnb->dev = dev; 1199 1200 ifmedia_init(&xnb->sc_media, 0, xnb_ifmedia_upd, xnb_ifmedia_sts); 1201 ifmedia_add(&xnb->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 1202 ifmedia_set(&xnb->sc_media, IFM_ETHER|IFM_MANUAL); 1203 1204 /* 1205 * Set the MAC address to a dummy value (00:00:00:00:00), 1206 * if the MAC address of the host-facing interface is set 1207 * to the same as the guest-facing one (the value found in 1208 * xenstore), the bridge would stop delivering packets to 1209 * us because it would see that the destination address of 1210 * the packet is the same as the interface, and so the bridge 1211 * would expect the packet has already been delivered locally 1212 * (and just drop it). 1213 */ 1214 bzero(&xnb->mac[0], sizeof(xnb->mac)); 1215 1216 /* The interface will be named using the following nomenclature: 1217 * 1218 * xnb<domid>.<handle> 1219 * 1220 * Where handle is the oder of the interface referred to the guest. 1221 */ 1222 err = xs_scanf(XST_NIL, xenbus_get_node(xnb->dev), "handle", NULL, 1223 "%" PRIu32, &handle); 1224 if (err != 0) 1225 return (err); 1226 snprintf(xnb->if_name, IFNAMSIZ, "xnb%" PRIu16 ".%" PRIu32, 1227 xenbus_get_otherend_id(dev), handle); 1228 1229 if (err == 0) { 1230 /* Set up ifnet structure */ 1231 ifp = xnb->xnb_ifp = if_alloc(IFT_ETHER); 1232 ifp->if_softc = xnb; 1233 if_initname(ifp, xnb->if_name, IF_DUNIT_NONE); 1234 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1235 ifp->if_ioctl = xnb_ioctl; 1236 ifp->if_output = ether_output; 1237 ifp->if_start = xnb_start; 1238#ifdef notyet 1239 ifp->if_watchdog = xnb_watchdog; 1240#endif 1241 ifp->if_init = xnb_ifinit; 1242 ifp->if_mtu = ETHERMTU; 1243 ifp->if_snd.ifq_maxlen = NET_RX_RING_SIZE - 1; 1244 1245 ifp->if_hwassist = XNB_CSUM_FEATURES; 1246 ifp->if_capabilities = IFCAP_HWCSUM; 1247 ifp->if_capenable = IFCAP_HWCSUM; 1248 1249 ether_ifattach(ifp, xnb->mac); 1250 xnb->carrier = 0; 1251 } 1252 1253 return err; 1254} 1255 1256/** 1257 * Attach to a XenBus device that has been claimed by our probe routine. 1258 * 1259 * \param dev NewBus device object representing this Xen Net Back instance. 1260 * 1261 * \return 0 for success, errno codes for failure. 1262 */ 1263static int 1264xnb_attach(device_t dev) 1265{ 1266 struct xnb_softc *xnb; 1267 int error; 1268 xnb_ring_type_t i; 1269 1270 error = create_netdev(dev); 1271 if (error != 0) { 1272 xenbus_dev_fatal(dev, error, "creating netdev"); 1273 return (error); 1274 } 1275 1276 DPRINTF("Attaching to %s\n", xenbus_get_node(dev)); 1277 1278 /* 1279 * Basic initialization. 1280 * After this block it is safe to call xnb_detach() 1281 * to clean up any allocated data for this instance. 1282 */ 1283 xnb = device_get_softc(dev); 1284 xnb->otherend_id = xenbus_get_otherend_id(dev); 1285 for (i=0; i < XNB_NUM_RING_TYPES; i++) { 1286 xnb->ring_configs[i].ring_pages = 1; 1287 } 1288 1289 /* 1290 * Setup sysctl variables. 1291 */ 1292 xnb_setup_sysctl(xnb); 1293 1294 /* Update hot-plug status to satisfy xend. */ 1295 error = xs_printf(XST_NIL, xenbus_get_node(xnb->dev), 1296 "hotplug-status", "connected"); 1297 if (error != 0) { 1298 xnb_attach_failed(xnb, error, "writing %s/hotplug-status", 1299 xenbus_get_node(xnb->dev)); 1300 return (error); 1301 } 1302 1303 if ((error = xnb_publish_backend_info(xnb)) != 0) { 1304 /* 1305 * If we can't publish our data, we cannot participate 1306 * in this connection, and waiting for a front-end state 1307 * change will not help the situation. 1308 */ 1309 xnb_attach_failed(xnb, error, 1310 "Publishing backend status for %s", 1311 xenbus_get_node(xnb->dev)); 1312 return error; 1313 } 1314 1315 /* Tell the front end that we are ready to connect. */ 1316 xenbus_set_state(dev, XenbusStateInitWait); 1317 1318 return (0); 1319} 1320 1321/** 1322 * Detach from a net back device instance. 1323 * 1324 * \param dev NewBus device object representing this Xen Net Back instance. 1325 * 1326 * \return 0 for success, errno codes for failure. 1327 * 1328 * \note A net back device may be detached at any time in its life-cycle, 1329 * including part way through the attach process. For this reason, 1330 * initialization order and the intialization state checks in this 1331 * routine must be carefully coupled so that attach time failures 1332 * are gracefully handled. 1333 */ 1334static int 1335xnb_detach(device_t dev) 1336{ 1337 struct xnb_softc *xnb; 1338 1339 DPRINTF("\n"); 1340 1341 xnb = device_get_softc(dev); 1342 mtx_lock(&xnb->sc_lock); 1343 while (xnb_shutdown(xnb) == EAGAIN) { 1344 msleep(xnb, &xnb->sc_lock, /*wakeup prio unchanged*/0, 1345 "xnb_shutdown", 0); 1346 } 1347 mtx_unlock(&xnb->sc_lock); 1348 DPRINTF("\n"); 1349 1350 mtx_destroy(&xnb->tx_lock); 1351 mtx_destroy(&xnb->rx_lock); 1352 mtx_destroy(&xnb->sc_lock); 1353 return (0); 1354} 1355 1356/** 1357 * Prepare this net back device for suspension of this VM. 1358 * 1359 * \param dev NewBus device object representing this Xen net Back instance. 1360 * 1361 * \return 0 for success, errno codes for failure. 1362 */ 1363static int 1364xnb_suspend(device_t dev) 1365{ 1366 return (0); 1367} 1368 1369/** 1370 * Perform any processing required to recover from a suspended state. 1371 * 1372 * \param dev NewBus device object representing this Xen Net Back instance. 1373 * 1374 * \return 0 for success, errno codes for failure. 1375 */ 1376static int 1377xnb_resume(device_t dev) 1378{ 1379 return (0); 1380} 1381 1382/** 1383 * Handle state changes expressed via the XenStore by our front-end peer. 1384 * 1385 * \param dev NewBus device object representing this Xen 1386 * Net Back instance. 1387 * \param frontend_state The new state of the front-end. 1388 * 1389 * \return 0 for success, errno codes for failure. 1390 */ 1391static void 1392xnb_frontend_changed(device_t dev, XenbusState frontend_state) 1393{ 1394 struct xnb_softc *xnb; 1395 1396 xnb = device_get_softc(dev); 1397 1398 DPRINTF("frontend_state=%s, xnb_state=%s\n", 1399 xenbus_strstate(frontend_state), 1400 xenbus_strstate(xenbus_get_state(xnb->dev))); 1401 1402 switch (frontend_state) { 1403 case XenbusStateInitialising: 1404 break; 1405 case XenbusStateInitialised: 1406 case XenbusStateConnected: 1407 xnb_connect(xnb); 1408 break; 1409 case XenbusStateClosing: 1410 case XenbusStateClosed: 1411 mtx_lock(&xnb->sc_lock); 1412 xnb_shutdown(xnb); 1413 mtx_unlock(&xnb->sc_lock); 1414 if (frontend_state == XenbusStateClosed) 1415 xenbus_set_state(xnb->dev, XenbusStateClosed); 1416 break; 1417 default: 1418 xenbus_dev_fatal(xnb->dev, EINVAL, "saw state %d at frontend", 1419 frontend_state); 1420 break; 1421 } 1422} 1423 1424 1425/*---------------------------- Request Processing ----------------------------*/ 1426/** 1427 * Interrupt handler bound to the shared ring's event channel. 1428 * Entry point for the xennet transmit path in netback 1429 * Transfers packets from the Xen ring to the host's generic networking stack 1430 * 1431 * \param arg Callback argument registerd during event channel 1432 * binding - the xnb_softc for this instance. 1433 */ 1434static void 1435xnb_intr(void *arg) 1436{ 1437 struct xnb_softc *xnb; 1438 struct ifnet *ifp; 1439 netif_tx_back_ring_t *txb; 1440 RING_IDX req_prod_local; 1441 1442 xnb = (struct xnb_softc *)arg; 1443 ifp = xnb->xnb_ifp; 1444 txb = &xnb->ring_configs[XNB_RING_TYPE_TX].back_ring.tx_ring; 1445 1446 mtx_lock(&xnb->tx_lock); 1447 do { 1448 int notify; 1449 req_prod_local = txb->sring->req_prod; 1450 xen_rmb(); 1451 1452 for (;;) { 1453 struct mbuf *mbufc; 1454 int err; 1455 1456 err = xnb_recv(txb, xnb->otherend_id, &mbufc, ifp, 1457 xnb->tx_gnttab); 1458 if (err || (mbufc == NULL)) 1459 break; 1460 1461 /* Send the packet to the generic network stack */ 1462 (*xnb->xnb_ifp->if_input)(xnb->xnb_ifp, mbufc); 1463 } 1464 1465 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(txb, notify); 1466 if (notify != 0) 1467 xen_intr_signal(xnb->xen_intr_handle); 1468 1469 txb->sring->req_event = txb->req_cons + 1; 1470 xen_mb(); 1471 } while (txb->sring->req_prod != req_prod_local) ; 1472 mtx_unlock(&xnb->tx_lock); 1473 1474 xnb_start(ifp); 1475} 1476 1477 1478/** 1479 * Build a struct xnb_pkt based on netif_tx_request's from a netif tx ring. 1480 * Will read exactly 0 or 1 packets from the ring; never a partial packet. 1481 * \param[out] pkt The returned packet. If there is an error building 1482 * the packet, pkt.list_len will be set to 0. 1483 * \param[in] tx_ring Pointer to the Ring that is the input to this function 1484 * \param[in] start The ring index of the first potential request 1485 * \return The number of requests consumed to build this packet 1486 */ 1487static int 1488xnb_ring2pkt(struct xnb_pkt *pkt, const netif_tx_back_ring_t *tx_ring, 1489 RING_IDX start) 1490{ 1491 /* 1492 * Outline: 1493 * 1) Initialize pkt 1494 * 2) Read the first request of the packet 1495 * 3) Read the extras 1496 * 4) Set cdr 1497 * 5) Loop on the remainder of the packet 1498 * 6) Finalize pkt (stuff like car_size and list_len) 1499 */ 1500 int idx = start; 1501 int discard = 0; /* whether to discard the packet */ 1502 int more_data = 0; /* there are more request past the last one */ 1503 uint16_t cdr_size = 0; /* accumulated size of requests 2 through n */ 1504 1505 xnb_pkt_initialize(pkt); 1506 1507 /* Read the first request */ 1508 if (RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) { 1509 netif_tx_request_t *tx = RING_GET_REQUEST(tx_ring, idx); 1510 pkt->size = tx->size; 1511 pkt->flags = tx->flags & ~NETTXF_more_data; 1512 more_data = tx->flags & NETTXF_more_data; 1513 pkt->list_len++; 1514 pkt->car = idx; 1515 idx++; 1516 } 1517 1518 /* Read the extra info */ 1519 if ((pkt->flags & NETTXF_extra_info) && 1520 RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) { 1521 netif_extra_info_t *ext = 1522 (netif_extra_info_t*) RING_GET_REQUEST(tx_ring, idx); 1523 pkt->extra.type = ext->type; 1524 switch (pkt->extra.type) { 1525 case XEN_NETIF_EXTRA_TYPE_GSO: 1526 pkt->extra.u.gso = ext->u.gso; 1527 break; 1528 default: 1529 /* 1530 * The reference Linux netfront driver will 1531 * never set any other extra.type. So we don't 1532 * know what to do with it. Let's print an 1533 * error, then consume and discard the packet 1534 */ 1535 printf("xnb(%s:%d): Unknown extra info type %d." 1536 " Discarding packet\n", 1537 __func__, __LINE__, pkt->extra.type); 1538 xnb_dump_txreq(start, RING_GET_REQUEST(tx_ring, 1539 start)); 1540 xnb_dump_txreq(idx, RING_GET_REQUEST(tx_ring, 1541 idx)); 1542 discard = 1; 1543 break; 1544 } 1545 1546 pkt->extra.flags = ext->flags; 1547 if (ext->flags & XEN_NETIF_EXTRA_FLAG_MORE) { 1548 /* 1549 * The reference linux netfront driver never sets this 1550 * flag (nor does any other known netfront). So we 1551 * will discard the packet. 1552 */ 1553 printf("xnb(%s:%d): Request sets " 1554 "XEN_NETIF_EXTRA_FLAG_MORE, but we can't handle " 1555 "that\n", __func__, __LINE__); 1556 xnb_dump_txreq(start, RING_GET_REQUEST(tx_ring, start)); 1557 xnb_dump_txreq(idx, RING_GET_REQUEST(tx_ring, idx)); 1558 discard = 1; 1559 } 1560 1561 idx++; 1562 } 1563 1564 /* Set cdr. If there is not more data, cdr is invalid */ 1565 pkt->cdr = idx; 1566 1567 /* Loop on remainder of packet */ 1568 while (more_data && RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) { 1569 netif_tx_request_t *tx = RING_GET_REQUEST(tx_ring, idx); 1570 pkt->list_len++; 1571 cdr_size += tx->size; 1572 if (tx->flags & ~NETTXF_more_data) { 1573 /* There should be no other flags set at this point */ 1574 printf("xnb(%s:%d): Request sets unknown flags %d " 1575 "after the 1st request in the packet.\n", 1576 __func__, __LINE__, tx->flags); 1577 xnb_dump_txreq(start, RING_GET_REQUEST(tx_ring, start)); 1578 xnb_dump_txreq(idx, RING_GET_REQUEST(tx_ring, idx)); 1579 } 1580 1581 more_data = tx->flags & NETTXF_more_data; 1582 idx++; 1583 } 1584 1585 /* Finalize packet */ 1586 if (more_data != 0) { 1587 /* The ring ran out of requests before finishing the packet */ 1588 xnb_pkt_invalidate(pkt); 1589 idx = start; /* tell caller that we consumed no requests */ 1590 } else { 1591 /* Calculate car_size */ 1592 pkt->car_size = pkt->size - cdr_size; 1593 } 1594 if (discard != 0) { 1595 xnb_pkt_invalidate(pkt); 1596 } 1597 1598 return idx - start; 1599} 1600 1601 1602/** 1603 * Respond to all the requests that constituted pkt. Builds the responses and 1604 * writes them to the ring, but doesn't push them to the shared ring. 1605 * \param[in] pkt the packet that needs a response 1606 * \param[in] error true if there was an error handling the packet, such 1607 * as in the hypervisor copy op or mbuf allocation 1608 * \param[out] ring Responses go here 1609 */ 1610static void 1611xnb_txpkt2rsp(const struct xnb_pkt *pkt, netif_tx_back_ring_t *ring, 1612 int error) 1613{ 1614 /* 1615 * Outline: 1616 * 1) Respond to the first request 1617 * 2) Respond to the extra info reques 1618 * Loop through every remaining request in the packet, generating 1619 * responses that copy those requests' ids and sets the status 1620 * appropriately. 1621 */ 1622 netif_tx_request_t *tx; 1623 netif_tx_response_t *rsp; 1624 int i; 1625 uint16_t status; 1626 1627 status = (xnb_pkt_is_valid(pkt) == 0) || error ? 1628 NETIF_RSP_ERROR : NETIF_RSP_OKAY; 1629 KASSERT((pkt->list_len == 0) || (ring->rsp_prod_pvt == pkt->car), 1630 ("Cannot respond to ring requests out of order")); 1631 1632 if (pkt->list_len >= 1) { 1633 uint16_t id; 1634 tx = RING_GET_REQUEST(ring, ring->rsp_prod_pvt); 1635 id = tx->id; 1636 rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); 1637 rsp->id = id; 1638 rsp->status = status; 1639 ring->rsp_prod_pvt++; 1640 1641 if (pkt->flags & NETRXF_extra_info) { 1642 rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); 1643 rsp->status = NETIF_RSP_NULL; 1644 ring->rsp_prod_pvt++; 1645 } 1646 } 1647 1648 for (i=0; i < pkt->list_len - 1; i++) { 1649 uint16_t id; 1650 tx = RING_GET_REQUEST(ring, ring->rsp_prod_pvt); 1651 id = tx->id; 1652 rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); 1653 rsp->id = id; 1654 rsp->status = status; 1655 ring->rsp_prod_pvt++; 1656 } 1657} 1658 1659/** 1660 * Create an mbuf chain to represent a packet. Initializes all of the headers 1661 * in the mbuf chain, but does not copy the data. The returned chain must be 1662 * free()'d when no longer needed 1663 * \param[in] pkt A packet to model the mbuf chain after 1664 * \return A newly allocated mbuf chain, possibly with clusters attached. 1665 * NULL on failure 1666 */ 1667static struct mbuf* 1668xnb_pkt2mbufc(const struct xnb_pkt *pkt, struct ifnet *ifp) 1669{ 1670 /** 1671 * \todo consider using a memory pool for mbufs instead of 1672 * reallocating them for every packet 1673 */ 1674 /** \todo handle extra data */ 1675 struct mbuf *m; 1676 1677 m = m_getm(NULL, pkt->size, M_NOWAIT, MT_DATA); 1678 1679 if (m != NULL) { 1680 m->m_pkthdr.rcvif = ifp; 1681 if (pkt->flags & NETTXF_data_validated) { 1682 /* 1683 * We lie to the host OS and always tell it that the 1684 * checksums are ok, because the packet is unlikely to 1685 * get corrupted going across domains. 1686 */ 1687 m->m_pkthdr.csum_flags = ( 1688 CSUM_IP_CHECKED | 1689 CSUM_IP_VALID | 1690 CSUM_DATA_VALID | 1691 CSUM_PSEUDO_HDR 1692 ); 1693 m->m_pkthdr.csum_data = 0xffff; 1694 } 1695 } 1696 return m; 1697} 1698 1699/** 1700 * Build a gnttab_copy table that can be used to copy data from a pkt 1701 * to an mbufc. Does not actually perform the copy. Always uses gref's on 1702 * the packet side. 1703 * \param[in] pkt pkt's associated requests form the src for 1704 * the copy operation 1705 * \param[in] mbufc mbufc's storage forms the dest for the copy operation 1706 * \param[out] gnttab Storage for the returned grant table 1707 * \param[in] txb Pointer to the backend ring structure 1708 * \param[in] otherend_id The domain ID of the other end of the copy 1709 * \return The number of gnttab entries filled 1710 */ 1711static int 1712xnb_txpkt2gnttab(const struct xnb_pkt *pkt, const struct mbuf *mbufc, 1713 gnttab_copy_table gnttab, const netif_tx_back_ring_t *txb, 1714 domid_t otherend_id) 1715{ 1716 1717 const struct mbuf *mbuf = mbufc;/* current mbuf within the chain */ 1718 int gnt_idx = 0; /* index into grant table */ 1719 RING_IDX r_idx = pkt->car; /* index into tx ring buffer */ 1720 int r_ofs = 0; /* offset of next data within tx request's data area */ 1721 int m_ofs = 0; /* offset of next data within mbuf's data area */ 1722 /* size in bytes that still needs to be represented in the table */ 1723 uint16_t size_remaining = pkt->size; 1724 1725 while (size_remaining > 0) { 1726 const netif_tx_request_t *txq = RING_GET_REQUEST(txb, r_idx); 1727 const size_t mbuf_space = M_TRAILINGSPACE(mbuf) - m_ofs; 1728 const size_t req_size = 1729 r_idx == pkt->car ? pkt->car_size : txq->size; 1730 const size_t pkt_space = req_size - r_ofs; 1731 /* 1732 * space is the largest amount of data that can be copied in the 1733 * grant table's next entry 1734 */ 1735 const size_t space = MIN(pkt_space, mbuf_space); 1736 1737 /* TODO: handle this error condition without panicking */ 1738 KASSERT(gnt_idx < GNTTAB_LEN, ("Grant table is too short")); 1739 1740 gnttab[gnt_idx].source.u.ref = txq->gref; 1741 gnttab[gnt_idx].source.domid = otherend_id; 1742 gnttab[gnt_idx].source.offset = txq->offset + r_ofs; 1743 gnttab[gnt_idx].dest.u.gmfn = virt_to_mfn( 1744 mtod(mbuf, vm_offset_t) + m_ofs); 1745 gnttab[gnt_idx].dest.offset = virt_to_offset( 1746 mtod(mbuf, vm_offset_t) + m_ofs); 1747 gnttab[gnt_idx].dest.domid = DOMID_SELF; 1748 gnttab[gnt_idx].len = space; 1749 gnttab[gnt_idx].flags = GNTCOPY_source_gref; 1750 1751 gnt_idx++; 1752 r_ofs += space; 1753 m_ofs += space; 1754 size_remaining -= space; 1755 if (req_size - r_ofs <= 0) { 1756 /* Must move to the next tx request */ 1757 r_ofs = 0; 1758 r_idx = (r_idx == pkt->car) ? pkt->cdr : r_idx + 1; 1759 } 1760 if (M_TRAILINGSPACE(mbuf) - m_ofs <= 0) { 1761 /* Must move to the next mbuf */ 1762 m_ofs = 0; 1763 mbuf = mbuf->m_next; 1764 } 1765 } 1766 1767 return gnt_idx; 1768} 1769 1770/** 1771 * Check the status of the grant copy operations, and update mbufs various 1772 * non-data fields to reflect the data present. 1773 * \param[in,out] mbufc mbuf chain to update. The chain must be valid and of 1774 * the correct length, and data should already be present 1775 * \param[in] gnttab A grant table for a just completed copy op 1776 * \param[in] n_entries The number of valid entries in the grant table 1777 */ 1778static void 1779xnb_update_mbufc(struct mbuf *mbufc, const gnttab_copy_table gnttab, 1780 int n_entries) 1781{ 1782 struct mbuf *mbuf = mbufc; 1783 int i; 1784 size_t total_size = 0; 1785 1786 for (i = 0; i < n_entries; i++) { 1787 KASSERT(gnttab[i].status == GNTST_okay, 1788 ("Some gnttab_copy entry had error status %hd\n", 1789 gnttab[i].status)); 1790 1791 mbuf->m_len += gnttab[i].len; 1792 total_size += gnttab[i].len; 1793 if (M_TRAILINGSPACE(mbuf) <= 0) { 1794 mbuf = mbuf->m_next; 1795 } 1796 } 1797 mbufc->m_pkthdr.len = total_size; 1798 1799#if defined(INET) || defined(INET6) 1800 xnb_add_mbuf_cksum(mbufc); 1801#endif 1802} 1803 1804/** 1805 * Dequeue at most one packet from the shared ring 1806 * \param[in,out] txb Netif tx ring. A packet will be removed from it, and 1807 * its private indices will be updated. But the indices 1808 * will not be pushed to the shared ring. 1809 * \param[in] ifnet Interface to which the packet will be sent 1810 * \param[in] otherend Domain ID of the other end of the ring 1811 * \param[out] mbufc The assembled mbuf chain, ready to send to the generic 1812 * networking stack 1813 * \param[in,out] gnttab Pointer to enough memory for a grant table. We make 1814 * this a function parameter so that we will take less 1815 * stack space. 1816 * \return An error code 1817 */ 1818static int 1819xnb_recv(netif_tx_back_ring_t *txb, domid_t otherend, struct mbuf **mbufc, 1820 struct ifnet *ifnet, gnttab_copy_table gnttab) 1821{ 1822 struct xnb_pkt pkt; 1823 /* number of tx requests consumed to build the last packet */ 1824 int num_consumed; 1825 int nr_ents; 1826 1827 *mbufc = NULL; 1828 num_consumed = xnb_ring2pkt(&pkt, txb, txb->req_cons); 1829 if (num_consumed == 0) 1830 return 0; /* Nothing to receive */ 1831 1832 /* update statistics independent of errors */ 1833 if_inc_counter(ifnet, IFCOUNTER_IPACKETS, 1); 1834 1835 /* 1836 * if we got here, then 1 or more requests was consumed, but the packet 1837 * is not necessarily valid. 1838 */ 1839 if (xnb_pkt_is_valid(&pkt) == 0) { 1840 /* got a garbage packet, respond and drop it */ 1841 xnb_txpkt2rsp(&pkt, txb, 1); 1842 txb->req_cons += num_consumed; 1843 DPRINTF("xnb_intr: garbage packet, num_consumed=%d\n", 1844 num_consumed); 1845 if_inc_counter(ifnet, IFCOUNTER_IERRORS, 1); 1846 return EINVAL; 1847 } 1848 1849 *mbufc = xnb_pkt2mbufc(&pkt, ifnet); 1850 1851 if (*mbufc == NULL) { 1852 /* 1853 * Couldn't allocate mbufs. Respond and drop the packet. Do 1854 * not consume the requests 1855 */ 1856 xnb_txpkt2rsp(&pkt, txb, 1); 1857 DPRINTF("xnb_intr: Couldn't allocate mbufs, num_consumed=%d\n", 1858 num_consumed); 1859 if_inc_counter(ifnet, IFCOUNTER_IQDROPS, 1); 1860 return ENOMEM; 1861 } 1862 1863 nr_ents = xnb_txpkt2gnttab(&pkt, *mbufc, gnttab, txb, otherend); 1864 1865 if (nr_ents > 0) { 1866 int __unused hv_ret = HYPERVISOR_grant_table_op(GNTTABOP_copy, 1867 gnttab, nr_ents); 1868 KASSERT(hv_ret == 0, 1869 ("HYPERVISOR_grant_table_op returned %d\n", hv_ret)); 1870 xnb_update_mbufc(*mbufc, gnttab, nr_ents); 1871 } 1872 1873 xnb_txpkt2rsp(&pkt, txb, 0); 1874 txb->req_cons += num_consumed; 1875 return 0; 1876} 1877 1878/** 1879 * Create an xnb_pkt based on the contents of an mbuf chain. 1880 * \param[in] mbufc mbuf chain to transform into a packet 1881 * \param[out] pkt Storage for the newly generated xnb_pkt 1882 * \param[in] start The ring index of the first available slot in the rx 1883 * ring 1884 * \param[in] space The number of free slots in the rx ring 1885 * \retval 0 Success 1886 * \retval EINVAL mbufc was corrupt or not convertible into a pkt 1887 * \retval EAGAIN There was not enough space in the ring to queue the 1888 * packet 1889 */ 1890static int 1891xnb_mbufc2pkt(const struct mbuf *mbufc, struct xnb_pkt *pkt, 1892 RING_IDX start, int space) 1893{ 1894 1895 int retval = 0; 1896 1897 if ((mbufc == NULL) || 1898 ( (mbufc->m_flags & M_PKTHDR) == 0) || 1899 (mbufc->m_pkthdr.len == 0)) { 1900 xnb_pkt_invalidate(pkt); 1901 retval = EINVAL; 1902 } else { 1903 int slots_required; 1904 1905 xnb_pkt_validate(pkt); 1906 pkt->flags = 0; 1907 pkt->size = mbufc->m_pkthdr.len; 1908 pkt->car = start; 1909 pkt->car_size = mbufc->m_len; 1910 1911 if (mbufc->m_pkthdr.csum_flags & CSUM_TSO) { 1912 pkt->flags |= NETRXF_extra_info; 1913 pkt->extra.u.gso.size = mbufc->m_pkthdr.tso_segsz; 1914 pkt->extra.u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; 1915 pkt->extra.u.gso.pad = 0; 1916 pkt->extra.u.gso.features = 0; 1917 pkt->extra.type = XEN_NETIF_EXTRA_TYPE_GSO; 1918 pkt->extra.flags = 0; 1919 pkt->cdr = start + 2; 1920 } else { 1921 pkt->cdr = start + 1; 1922 } 1923 if (mbufc->m_pkthdr.csum_flags & (CSUM_TSO | CSUM_DELAY_DATA)) { 1924 pkt->flags |= 1925 (NETRXF_csum_blank | NETRXF_data_validated); 1926 } 1927 1928 /* 1929 * Each ring response can have up to PAGE_SIZE of data. 1930 * Assume that we can defragment the mbuf chain efficiently 1931 * into responses so that each response but the last uses all 1932 * PAGE_SIZE bytes. 1933 */ 1934 pkt->list_len = (pkt->size + PAGE_SIZE - 1) / PAGE_SIZE; 1935 1936 if (pkt->list_len > 1) { 1937 pkt->flags |= NETRXF_more_data; 1938 } 1939 1940 slots_required = pkt->list_len + 1941 (pkt->flags & NETRXF_extra_info ? 1 : 0); 1942 if (slots_required > space) { 1943 xnb_pkt_invalidate(pkt); 1944 retval = EAGAIN; 1945 } 1946 } 1947 1948 return retval; 1949} 1950 1951/** 1952 * Build a gnttab_copy table that can be used to copy data from an mbuf chain 1953 * to the frontend's shared buffers. Does not actually perform the copy. 1954 * Always uses gref's on the other end's side. 1955 * \param[in] pkt pkt's associated responses form the dest for the copy 1956 * operatoin 1957 * \param[in] mbufc The source for the copy operation 1958 * \param[out] gnttab Storage for the returned grant table 1959 * \param[in] rxb Pointer to the backend ring structure 1960 * \param[in] otherend_id The domain ID of the other end of the copy 1961 * \return The number of gnttab entries filled 1962 */ 1963static int 1964xnb_rxpkt2gnttab(const struct xnb_pkt *pkt, const struct mbuf *mbufc, 1965 gnttab_copy_table gnttab, const netif_rx_back_ring_t *rxb, 1966 domid_t otherend_id) 1967{ 1968 1969 const struct mbuf *mbuf = mbufc;/* current mbuf within the chain */ 1970 int gnt_idx = 0; /* index into grant table */ 1971 RING_IDX r_idx = pkt->car; /* index into rx ring buffer */ 1972 int r_ofs = 0; /* offset of next data within rx request's data area */ 1973 int m_ofs = 0; /* offset of next data within mbuf's data area */ 1974 /* size in bytes that still needs to be represented in the table */ 1975 uint16_t size_remaining; 1976 1977 size_remaining = (xnb_pkt_is_valid(pkt) != 0) ? pkt->size : 0; 1978 1979 while (size_remaining > 0) { 1980 const netif_rx_request_t *rxq = RING_GET_REQUEST(rxb, r_idx); 1981 const size_t mbuf_space = mbuf->m_len - m_ofs; 1982 /* Xen shared pages have an implied size of PAGE_SIZE */ 1983 const size_t req_size = PAGE_SIZE; 1984 const size_t pkt_space = req_size - r_ofs; 1985 /* 1986 * space is the largest amount of data that can be copied in the 1987 * grant table's next entry 1988 */ 1989 const size_t space = MIN(pkt_space, mbuf_space); 1990 1991 /* TODO: handle this error condition without panicing */ 1992 KASSERT(gnt_idx < GNTTAB_LEN, ("Grant table is too short")); 1993 1994 gnttab[gnt_idx].dest.u.ref = rxq->gref; 1995 gnttab[gnt_idx].dest.domid = otherend_id; 1996 gnttab[gnt_idx].dest.offset = r_ofs; 1997 gnttab[gnt_idx].source.u.gmfn = virt_to_mfn( 1998 mtod(mbuf, vm_offset_t) + m_ofs); 1999 gnttab[gnt_idx].source.offset = virt_to_offset( 2000 mtod(mbuf, vm_offset_t) + m_ofs); 2001 gnttab[gnt_idx].source.domid = DOMID_SELF; 2002 gnttab[gnt_idx].len = space; 2003 gnttab[gnt_idx].flags = GNTCOPY_dest_gref; 2004 2005 gnt_idx++; 2006 2007 r_ofs += space; 2008 m_ofs += space; 2009 size_remaining -= space; 2010 if (req_size - r_ofs <= 0) { 2011 /* Must move to the next rx request */ 2012 r_ofs = 0; 2013 r_idx = (r_idx == pkt->car) ? pkt->cdr : r_idx + 1; 2014 } 2015 if (mbuf->m_len - m_ofs <= 0) { 2016 /* Must move to the next mbuf */ 2017 m_ofs = 0; 2018 mbuf = mbuf->m_next; 2019 } 2020 } 2021 2022 return gnt_idx; 2023} 2024 2025/** 2026 * Generates responses for all the requests that constituted pkt. Builds 2027 * responses and writes them to the ring, but doesn't push the shared ring 2028 * indices. 2029 * \param[in] pkt the packet that needs a response 2030 * \param[in] gnttab The grant copy table corresponding to this packet. 2031 * Used to determine how many rsp->netif_rx_response_t's to 2032 * generate. 2033 * \param[in] n_entries Number of relevant entries in the grant table 2034 * \param[out] ring Responses go here 2035 * \return The number of RX requests that were consumed to generate 2036 * the responses 2037 */ 2038static int 2039xnb_rxpkt2rsp(const struct xnb_pkt *pkt, const gnttab_copy_table gnttab, 2040 int n_entries, netif_rx_back_ring_t *ring) 2041{ 2042 /* 2043 * This code makes the following assumptions: 2044 * * All entries in gnttab set GNTCOPY_dest_gref 2045 * * The entries in gnttab are grouped by their grefs: any two 2046 * entries with the same gref must be adjacent 2047 */ 2048 int error = 0; 2049 int gnt_idx, i; 2050 int n_responses = 0; 2051 grant_ref_t last_gref = GRANT_REF_INVALID; 2052 RING_IDX r_idx; 2053 2054 KASSERT(gnttab != NULL, ("Received a null granttable copy")); 2055 2056 /* 2057 * In the event of an error, we only need to send one response to the 2058 * netfront. In that case, we musn't write any data to the responses 2059 * after the one we send. So we must loop all the way through gnttab 2060 * looking for errors before we generate any responses 2061 * 2062 * Since we're looping through the grant table anyway, we'll count the 2063 * number of different gref's in it, which will tell us how many 2064 * responses to generate 2065 */ 2066 for (gnt_idx = 0; gnt_idx < n_entries; gnt_idx++) { 2067 int16_t status = gnttab[gnt_idx].status; 2068 if (status != GNTST_okay) { 2069 DPRINTF( 2070 "Got error %d for hypervisor gnttab_copy status\n", 2071 status); 2072 error = 1; 2073 break; 2074 } 2075 if (gnttab[gnt_idx].dest.u.ref != last_gref) { 2076 n_responses++; 2077 last_gref = gnttab[gnt_idx].dest.u.ref; 2078 } 2079 } 2080 2081 if (error != 0) { 2082 uint16_t id; 2083 netif_rx_response_t *rsp; 2084 2085 id = RING_GET_REQUEST(ring, ring->rsp_prod_pvt)->id; 2086 rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); 2087 rsp->id = id; 2088 rsp->status = NETIF_RSP_ERROR; 2089 n_responses = 1; 2090 } else { 2091 gnt_idx = 0; 2092 const int has_extra = pkt->flags & NETRXF_extra_info; 2093 if (has_extra != 0) 2094 n_responses++; 2095 2096 for (i = 0; i < n_responses; i++) { 2097 netif_rx_request_t rxq; 2098 netif_rx_response_t *rsp; 2099 2100 r_idx = ring->rsp_prod_pvt + i; 2101 /* 2102 * We copy the structure of rxq instead of making a 2103 * pointer because it shares the same memory as rsp. 2104 */ 2105 rxq = *(RING_GET_REQUEST(ring, r_idx)); 2106 rsp = RING_GET_RESPONSE(ring, r_idx); 2107 if (has_extra && (i == 1)) { 2108 netif_extra_info_t *ext = 2109 (netif_extra_info_t*)rsp; 2110 ext->type = XEN_NETIF_EXTRA_TYPE_GSO; 2111 ext->flags = 0; 2112 ext->u.gso.size = pkt->extra.u.gso.size; 2113 ext->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; 2114 ext->u.gso.pad = 0; 2115 ext->u.gso.features = 0; 2116 } else { 2117 rsp->id = rxq.id; 2118 rsp->status = GNTST_okay; 2119 rsp->offset = 0; 2120 rsp->flags = 0; 2121 if (i < pkt->list_len - 1) 2122 rsp->flags |= NETRXF_more_data; 2123 if ((i == 0) && has_extra) 2124 rsp->flags |= NETRXF_extra_info; 2125 if ((i == 0) && 2126 (pkt->flags & NETRXF_data_validated)) { 2127 rsp->flags |= NETRXF_data_validated; 2128 rsp->flags |= NETRXF_csum_blank; 2129 } 2130 rsp->status = 0; 2131 for (; gnttab[gnt_idx].dest.u.ref == rxq.gref; 2132 gnt_idx++) { 2133 rsp->status += gnttab[gnt_idx].len; 2134 } 2135 } 2136 } 2137 } 2138 2139 ring->req_cons += n_responses; 2140 ring->rsp_prod_pvt += n_responses; 2141 return n_responses; 2142} 2143 2144#if defined(INET) || defined(INET6) 2145/** 2146 * Add IP, TCP, and/or UDP checksums to every mbuf in a chain. The first mbuf 2147 * in the chain must start with a struct ether_header. 2148 * 2149 * XXX This function will perform incorrectly on UDP packets that are split up 2150 * into multiple ethernet frames. 2151 */ 2152static void 2153xnb_add_mbuf_cksum(struct mbuf *mbufc) 2154{ 2155 struct ether_header *eh; 2156 struct ip *iph; 2157 uint16_t ether_type; 2158 2159 eh = mtod(mbufc, struct ether_header*); 2160 ether_type = ntohs(eh->ether_type); 2161 if (ether_type != ETHERTYPE_IP) { 2162 /* Nothing to calculate */ 2163 return; 2164 } 2165 2166 iph = (struct ip*)(eh + 1); 2167 if (mbufc->m_pkthdr.csum_flags & CSUM_IP_VALID) { 2168 iph->ip_sum = 0; 2169 iph->ip_sum = in_cksum_hdr(iph); 2170 } 2171 2172 switch (iph->ip_p) { 2173 case IPPROTO_TCP: 2174 if (mbufc->m_pkthdr.csum_flags & CSUM_IP_VALID) { 2175 size_t tcplen = ntohs(iph->ip_len) - sizeof(struct ip); 2176 struct tcphdr *th = (struct tcphdr*)(iph + 1); 2177 th->th_sum = in_pseudo(iph->ip_src.s_addr, 2178 iph->ip_dst.s_addr, htons(IPPROTO_TCP + tcplen)); 2179 th->th_sum = in_cksum_skip(mbufc, 2180 sizeof(struct ether_header) + ntohs(iph->ip_len), 2181 sizeof(struct ether_header) + (iph->ip_hl << 2)); 2182 } 2183 break; 2184 case IPPROTO_UDP: 2185 if (mbufc->m_pkthdr.csum_flags & CSUM_IP_VALID) { 2186 size_t udplen = ntohs(iph->ip_len) - sizeof(struct ip); 2187 struct udphdr *uh = (struct udphdr*)(iph + 1); 2188 uh->uh_sum = in_pseudo(iph->ip_src.s_addr, 2189 iph->ip_dst.s_addr, htons(IPPROTO_UDP + udplen)); 2190 uh->uh_sum = in_cksum_skip(mbufc, 2191 sizeof(struct ether_header) + ntohs(iph->ip_len), 2192 sizeof(struct ether_header) + (iph->ip_hl << 2)); 2193 } 2194 break; 2195 default: 2196 break; 2197 } 2198} 2199#endif /* INET || INET6 */ 2200 2201static void 2202xnb_stop(struct xnb_softc *xnb) 2203{ 2204 struct ifnet *ifp; 2205 2206 mtx_assert(&xnb->sc_lock, MA_OWNED); 2207 ifp = xnb->xnb_ifp; 2208 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2209 if_link_state_change(ifp, LINK_STATE_DOWN); 2210} 2211 2212static int 2213xnb_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2214{ 2215 struct xnb_softc *xnb = ifp->if_softc; 2216 struct ifreq *ifr = (struct ifreq*) data; 2217#ifdef INET 2218 struct ifaddr *ifa = (struct ifaddr*)data; 2219#endif 2220 int error = 0; 2221 2222 switch (cmd) { 2223 case SIOCSIFFLAGS: 2224 mtx_lock(&xnb->sc_lock); 2225 if (ifp->if_flags & IFF_UP) { 2226 xnb_ifinit_locked(xnb); 2227 } else { 2228 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2229 xnb_stop(xnb); 2230 } 2231 } 2232 /* 2233 * Note: netfront sets a variable named xn_if_flags 2234 * here, but that variable is never read 2235 */ 2236 mtx_unlock(&xnb->sc_lock); 2237 break; 2238 case SIOCSIFADDR: 2239#ifdef INET 2240 mtx_lock(&xnb->sc_lock); 2241 if (ifa->ifa_addr->sa_family == AF_INET) { 2242 ifp->if_flags |= IFF_UP; 2243 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2244 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | 2245 IFF_DRV_OACTIVE); 2246 if_link_state_change(ifp, 2247 LINK_STATE_DOWN); 2248 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2249 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2250 if_link_state_change(ifp, 2251 LINK_STATE_UP); 2252 } 2253 arp_ifinit(ifp, ifa); 2254 mtx_unlock(&xnb->sc_lock); 2255 } else { 2256 mtx_unlock(&xnb->sc_lock); 2257#endif 2258 error = ether_ioctl(ifp, cmd, data); 2259#ifdef INET 2260 } 2261#endif 2262 break; 2263 case SIOCSIFCAP: 2264 mtx_lock(&xnb->sc_lock); 2265 if (ifr->ifr_reqcap & IFCAP_TXCSUM) { 2266 ifp->if_capenable |= IFCAP_TXCSUM; 2267 ifp->if_hwassist |= XNB_CSUM_FEATURES; 2268 } else { 2269 ifp->if_capenable &= ~(IFCAP_TXCSUM); 2270 ifp->if_hwassist &= ~(XNB_CSUM_FEATURES); 2271 } 2272 if ((ifr->ifr_reqcap & IFCAP_RXCSUM)) { 2273 ifp->if_capenable |= IFCAP_RXCSUM; 2274 } else { 2275 ifp->if_capenable &= ~(IFCAP_RXCSUM); 2276 } 2277 /* 2278 * TODO enable TSO4 and LRO once we no longer need 2279 * to calculate checksums in software 2280 */ 2281#if 0 2282 if (ifr->if_reqcap |= IFCAP_TSO4) { 2283 if (IFCAP_TXCSUM & ifp->if_capenable) { 2284 printf("xnb: Xen netif requires that " 2285 "TXCSUM be enabled in order " 2286 "to use TSO4\n"); 2287 error = EINVAL; 2288 } else { 2289 ifp->if_capenable |= IFCAP_TSO4; 2290 ifp->if_hwassist |= CSUM_TSO; 2291 } 2292 } else { 2293 ifp->if_capenable &= ~(IFCAP_TSO4); 2294 ifp->if_hwassist &= ~(CSUM_TSO); 2295 } 2296 if (ifr->ifreqcap |= IFCAP_LRO) { 2297 ifp->if_capenable |= IFCAP_LRO; 2298 } else { 2299 ifp->if_capenable &= ~(IFCAP_LRO); 2300 } 2301#endif 2302 mtx_unlock(&xnb->sc_lock); 2303 break; 2304 case SIOCSIFMTU: 2305 ifp->if_mtu = ifr->ifr_mtu; 2306 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2307 xnb_ifinit(xnb); 2308 break; 2309 case SIOCADDMULTI: 2310 case SIOCDELMULTI: 2311 case SIOCSIFMEDIA: 2312 case SIOCGIFMEDIA: 2313 error = ifmedia_ioctl(ifp, ifr, &xnb->sc_media, cmd); 2314 break; 2315 default: 2316 error = ether_ioctl(ifp, cmd, data); 2317 break; 2318 } 2319 return (error); 2320} 2321 2322static void 2323xnb_start_locked(struct ifnet *ifp) 2324{ 2325 netif_rx_back_ring_t *rxb; 2326 struct xnb_softc *xnb; 2327 struct mbuf *mbufc; 2328 RING_IDX req_prod_local; 2329 2330 xnb = ifp->if_softc; 2331 rxb = &xnb->ring_configs[XNB_RING_TYPE_RX].back_ring.rx_ring; 2332 2333 if (!xnb->carrier) 2334 return; 2335 2336 do { 2337 int out_of_space = 0; 2338 int notify; 2339 req_prod_local = rxb->sring->req_prod; 2340 xen_rmb(); 2341 for (;;) { 2342 int error; 2343 2344 IF_DEQUEUE(&ifp->if_snd, mbufc); 2345 if (mbufc == NULL) 2346 break; 2347 error = xnb_send(rxb, xnb->otherend_id, mbufc, 2348 xnb->rx_gnttab); 2349 switch (error) { 2350 case EAGAIN: 2351 /* 2352 * Insufficient space in the ring. 2353 * Requeue pkt and send when space is 2354 * available. 2355 */ 2356 IF_PREPEND(&ifp->if_snd, mbufc); 2357 /* 2358 * Perhaps the frontend missed an IRQ 2359 * and went to sleep. Notify it to wake 2360 * it up. 2361 */ 2362 out_of_space = 1; 2363 break; 2364 2365 case EINVAL: 2366 /* OS gave a corrupt packet. Drop it.*/ 2367 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2368 /* FALLTHROUGH */ 2369 default: 2370 /* Send succeeded, or packet had error. 2371 * Free the packet */ 2372 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 2373 if (mbufc) 2374 m_freem(mbufc); 2375 break; 2376 } 2377 if (out_of_space != 0) 2378 break; 2379 } 2380 2381 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(rxb, notify); 2382 if ((notify != 0) || (out_of_space != 0)) 2383 xen_intr_signal(xnb->xen_intr_handle); 2384 rxb->sring->req_event = req_prod_local + 1; 2385 xen_mb(); 2386 } while (rxb->sring->req_prod != req_prod_local) ; 2387} 2388 2389/** 2390 * Sends one packet to the ring. Blocks until the packet is on the ring 2391 * \param[in] mbufc Contains one packet to send. Caller must free 2392 * \param[in,out] rxb The packet will be pushed onto this ring, but the 2393 * otherend will not be notified. 2394 * \param[in] otherend The domain ID of the other end of the connection 2395 * \retval EAGAIN The ring did not have enough space for the packet. 2396 * The ring has not been modified 2397 * \param[in,out] gnttab Pointer to enough memory for a grant table. We make 2398 * this a function parameter so that we will take less 2399 * stack space. 2400 * \retval EINVAL mbufc was corrupt or not convertible into a pkt 2401 */ 2402static int 2403xnb_send(netif_rx_back_ring_t *ring, domid_t otherend, const struct mbuf *mbufc, 2404 gnttab_copy_table gnttab) 2405{ 2406 struct xnb_pkt pkt; 2407 int error, n_entries, n_reqs; 2408 RING_IDX space; 2409 2410 space = ring->sring->req_prod - ring->req_cons; 2411 error = xnb_mbufc2pkt(mbufc, &pkt, ring->rsp_prod_pvt, space); 2412 if (error != 0) 2413 return error; 2414 n_entries = xnb_rxpkt2gnttab(&pkt, mbufc, gnttab, ring, otherend); 2415 if (n_entries != 0) { 2416 int __unused hv_ret = HYPERVISOR_grant_table_op(GNTTABOP_copy, 2417 gnttab, n_entries); 2418 KASSERT(hv_ret == 0, ("HYPERVISOR_grant_table_op returned %d\n", 2419 hv_ret)); 2420 } 2421 2422 n_reqs = xnb_rxpkt2rsp(&pkt, gnttab, n_entries, ring); 2423 2424 return 0; 2425} 2426 2427static void 2428xnb_start(struct ifnet *ifp) 2429{ 2430 struct xnb_softc *xnb; 2431 2432 xnb = ifp->if_softc; 2433 mtx_lock(&xnb->rx_lock); 2434 xnb_start_locked(ifp); 2435 mtx_unlock(&xnb->rx_lock); 2436} 2437 2438/* equivalent of network_open() in Linux */ 2439static void 2440xnb_ifinit_locked(struct xnb_softc *xnb) 2441{ 2442 struct ifnet *ifp; 2443 2444 ifp = xnb->xnb_ifp; 2445 2446 mtx_assert(&xnb->sc_lock, MA_OWNED); 2447 2448 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2449 return; 2450 2451 xnb_stop(xnb); 2452 2453 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2454 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2455 if_link_state_change(ifp, LINK_STATE_UP); 2456} 2457 2458 2459static void 2460xnb_ifinit(void *xsc) 2461{ 2462 struct xnb_softc *xnb = xsc; 2463 2464 mtx_lock(&xnb->sc_lock); 2465 xnb_ifinit_locked(xnb); 2466 mtx_unlock(&xnb->sc_lock); 2467} 2468 2469/** 2470 * Callback used by the generic networking code to tell us when our carrier 2471 * state has changed. Since we don't have a physical carrier, we don't care 2472 */ 2473static int 2474xnb_ifmedia_upd(struct ifnet *ifp) 2475{ 2476 return (0); 2477} 2478 2479/** 2480 * Callback used by the generic networking code to ask us what our carrier 2481 * state is. Since we don't have a physical carrier, this is very simple 2482 */ 2483static void 2484xnb_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2485{ 2486 ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE; 2487 ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; 2488} 2489 2490 2491/*---------------------------- NewBus Registration ---------------------------*/ 2492static device_method_t xnb_methods[] = { 2493 /* Device interface */ 2494 DEVMETHOD(device_probe, xnb_probe), 2495 DEVMETHOD(device_attach, xnb_attach), 2496 DEVMETHOD(device_detach, xnb_detach), 2497 DEVMETHOD(device_shutdown, bus_generic_shutdown), 2498 DEVMETHOD(device_suspend, xnb_suspend), 2499 DEVMETHOD(device_resume, xnb_resume), 2500 2501 /* Xenbus interface */ 2502 DEVMETHOD(xenbus_otherend_changed, xnb_frontend_changed), 2503 2504 { 0, 0 } 2505}; 2506 2507static driver_t xnb_driver = { 2508 "xnb", 2509 xnb_methods, 2510 sizeof(struct xnb_softc), 2511}; 2512devclass_t xnb_devclass; 2513 2514DRIVER_MODULE(xnb, xenbusb_back, xnb_driver, xnb_devclass, 0, 0); 2515 2516 2517/*-------------------------- Unit Tests -------------------------------------*/ 2518#ifdef XNB_DEBUG 2519#include "netback_unit_tests.c" 2520#endif
| 134#define virt_to_offset(x) ((x) & (PAGE_SIZE - 1)) 135 136/** 137 * Predefined array type of grant table copy descriptors. Used to pass around 138 * statically allocated memory structures. 139 */ 140typedef struct gnttab_copy gnttab_copy_table[GNTTAB_LEN]; 141 142/*--------------------------- Forward Declarations ---------------------------*/ 143struct xnb_softc; 144struct xnb_pkt; 145 146static void xnb_attach_failed(struct xnb_softc *xnb, 147 int err, const char *fmt, ...) 148 __printflike(3,4); 149static int xnb_shutdown(struct xnb_softc *xnb); 150static int create_netdev(device_t dev); 151static int xnb_detach(device_t dev); 152static int xnb_ifmedia_upd(struct ifnet *ifp); 153static void xnb_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); 154static void xnb_intr(void *arg); 155static int xnb_send(netif_rx_back_ring_t *rxb, domid_t otherend, 156 const struct mbuf *mbufc, gnttab_copy_table gnttab); 157static int xnb_recv(netif_tx_back_ring_t *txb, domid_t otherend, 158 struct mbuf **mbufc, struct ifnet *ifnet, 159 gnttab_copy_table gnttab); 160static int xnb_ring2pkt(struct xnb_pkt *pkt, 161 const netif_tx_back_ring_t *tx_ring, 162 RING_IDX start); 163static void xnb_txpkt2rsp(const struct xnb_pkt *pkt, 164 netif_tx_back_ring_t *ring, int error); 165static struct mbuf *xnb_pkt2mbufc(const struct xnb_pkt *pkt, struct ifnet *ifp); 166static int xnb_txpkt2gnttab(const struct xnb_pkt *pkt, 167 const struct mbuf *mbufc, 168 gnttab_copy_table gnttab, 169 const netif_tx_back_ring_t *txb, 170 domid_t otherend_id); 171static void xnb_update_mbufc(struct mbuf *mbufc, 172 const gnttab_copy_table gnttab, int n_entries); 173static int xnb_mbufc2pkt(const struct mbuf *mbufc, 174 struct xnb_pkt *pkt, 175 RING_IDX start, int space); 176static int xnb_rxpkt2gnttab(const struct xnb_pkt *pkt, 177 const struct mbuf *mbufc, 178 gnttab_copy_table gnttab, 179 const netif_rx_back_ring_t *rxb, 180 domid_t otherend_id); 181static int xnb_rxpkt2rsp(const struct xnb_pkt *pkt, 182 const gnttab_copy_table gnttab, int n_entries, 183 netif_rx_back_ring_t *ring); 184static void xnb_stop(struct xnb_softc*); 185static int xnb_ioctl(struct ifnet*, u_long, caddr_t); 186static void xnb_start_locked(struct ifnet*); 187static void xnb_start(struct ifnet*); 188static void xnb_ifinit_locked(struct xnb_softc*); 189static void xnb_ifinit(void*); 190#ifdef XNB_DEBUG 191static int xnb_unit_test_main(SYSCTL_HANDLER_ARGS); 192static int xnb_dump_rings(SYSCTL_HANDLER_ARGS); 193#endif 194#if defined(INET) || defined(INET6) 195static void xnb_add_mbuf_cksum(struct mbuf *mbufc); 196#endif 197/*------------------------------ Data Structures -----------------------------*/ 198 199 200/** 201 * Representation of a xennet packet. Simplified version of a packet as 202 * stored in the Xen tx ring. Applicable to both RX and TX packets 203 */ 204struct xnb_pkt{ 205 /** 206 * Array index of the first data-bearing (eg, not extra info) entry 207 * for this packet 208 */ 209 RING_IDX car; 210 211 /** 212 * Array index of the second data-bearing entry for this packet. 213 * Invalid if the packet has only one data-bearing entry. If the 214 * packet has more than two data-bearing entries, then the second 215 * through the last will be sequential modulo the ring size 216 */ 217 RING_IDX cdr; 218 219 /** 220 * Optional extra info. Only valid if flags contains 221 * NETTXF_extra_info. Note that extra.type will always be 222 * XEN_NETIF_EXTRA_TYPE_GSO. Currently, no known netfront or netback 223 * driver will ever set XEN_NETIF_EXTRA_TYPE_MCAST_* 224 */ 225 netif_extra_info_t extra; 226 227 /** Size of entire packet in bytes. */ 228 uint16_t size; 229 230 /** The size of the first entry's data in bytes */ 231 uint16_t car_size; 232 233 /** 234 * Either NETTXF_ or NETRXF_ flags. Note that the flag values are 235 * not the same for TX and RX packets 236 */ 237 uint16_t flags; 238 239 /** 240 * The number of valid data-bearing entries (either netif_tx_request's 241 * or netif_rx_response's) in the packet. If this is 0, it means the 242 * entire packet is invalid. 243 */ 244 uint16_t list_len; 245 246 /** There was an error processing the packet */ 247 uint8_t error; 248}; 249 250/** xnb_pkt method: initialize it */ 251static inline void 252xnb_pkt_initialize(struct xnb_pkt *pxnb) 253{ 254 bzero(pxnb, sizeof(*pxnb)); 255} 256 257/** xnb_pkt method: mark the packet as valid */ 258static inline void 259xnb_pkt_validate(struct xnb_pkt *pxnb) 260{ 261 pxnb->error = 0; 262}; 263 264/** xnb_pkt method: mark the packet as invalid */ 265static inline void 266xnb_pkt_invalidate(struct xnb_pkt *pxnb) 267{ 268 pxnb->error = 1; 269}; 270 271/** xnb_pkt method: Check whether the packet is valid */ 272static inline int 273xnb_pkt_is_valid(const struct xnb_pkt *pxnb) 274{ 275 return (! pxnb->error); 276} 277 278#ifdef XNB_DEBUG 279/** xnb_pkt method: print the packet's contents in human-readable format*/ 280static void __unused 281xnb_dump_pkt(const struct xnb_pkt *pkt) { 282 if (pkt == NULL) { 283 DPRINTF("Was passed a null pointer.\n"); 284 return; 285 } 286 DPRINTF("pkt address= %p\n", pkt); 287 DPRINTF("pkt->size=%d\n", pkt->size); 288 DPRINTF("pkt->car_size=%d\n", pkt->car_size); 289 DPRINTF("pkt->flags=0x%04x\n", pkt->flags); 290 DPRINTF("pkt->list_len=%d\n", pkt->list_len); 291 /* DPRINTF("pkt->extra"); TODO */ 292 DPRINTF("pkt->car=%d\n", pkt->car); 293 DPRINTF("pkt->cdr=%d\n", pkt->cdr); 294 DPRINTF("pkt->error=%d\n", pkt->error); 295} 296#endif /* XNB_DEBUG */ 297 298static void 299xnb_dump_txreq(RING_IDX idx, const struct netif_tx_request *txreq) 300{ 301 if (txreq != NULL) { 302 DPRINTF("netif_tx_request index =%u\n", idx); 303 DPRINTF("netif_tx_request.gref =%u\n", txreq->gref); 304 DPRINTF("netif_tx_request.offset=%hu\n", txreq->offset); 305 DPRINTF("netif_tx_request.flags =%hu\n", txreq->flags); 306 DPRINTF("netif_tx_request.id =%hu\n", txreq->id); 307 DPRINTF("netif_tx_request.size =%hu\n", txreq->size); 308 } 309} 310 311 312/** 313 * \brief Configuration data for a shared memory request ring 314 * used to communicate with the front-end client of this 315 * this driver. 316 */ 317struct xnb_ring_config { 318 /** 319 * Runtime structures for ring access. Unfortunately, TX and RX rings 320 * use different data structures, and that cannot be changed since it 321 * is part of the interdomain protocol. 322 */ 323 union{ 324 netif_rx_back_ring_t rx_ring; 325 netif_tx_back_ring_t tx_ring; 326 } back_ring; 327 328 /** 329 * The device bus address returned by the hypervisor when 330 * mapping the ring and required to unmap it when a connection 331 * is torn down. 332 */ 333 uint64_t bus_addr; 334 335 /** The pseudo-physical address where ring memory is mapped.*/ 336 uint64_t gnt_addr; 337 338 /** KVA address where ring memory is mapped. */ 339 vm_offset_t va; 340 341 /** 342 * Grant table handles, one per-ring page, returned by the 343 * hyperpervisor upon mapping of the ring and required to 344 * unmap it when a connection is torn down. 345 */ 346 grant_handle_t handle; 347 348 /** The number of ring pages mapped for the current connection. */ 349 unsigned ring_pages; 350 351 /** 352 * The grant references, one per-ring page, supplied by the 353 * front-end, allowing us to reference the ring pages in the 354 * front-end's domain and to map these pages into our own domain. 355 */ 356 grant_ref_t ring_ref; 357}; 358 359/** 360 * Per-instance connection state flags. 361 */ 362typedef enum 363{ 364 /** Communication with the front-end has been established. */ 365 XNBF_RING_CONNECTED = 0x01, 366 367 /** 368 * Front-end requests exist in the ring and are waiting for 369 * xnb_xen_req objects to free up. 370 */ 371 XNBF_RESOURCE_SHORTAGE = 0x02, 372 373 /** Connection teardown has started. */ 374 XNBF_SHUTDOWN = 0x04, 375 376 /** A thread is already performing shutdown processing. */ 377 XNBF_IN_SHUTDOWN = 0x08 378} xnb_flag_t; 379 380/** 381 * Types of rings. Used for array indices and to identify a ring's control 382 * data structure type 383 */ 384typedef enum{ 385 XNB_RING_TYPE_TX = 0, /* ID of TX rings, used for array indices */ 386 XNB_RING_TYPE_RX = 1, /* ID of RX rings, used for array indices */ 387 XNB_NUM_RING_TYPES 388} xnb_ring_type_t; 389 390/** 391 * Per-instance configuration data. 392 */ 393struct xnb_softc { 394 /** NewBus device corresponding to this instance. */ 395 device_t dev; 396 397 /* Media related fields */ 398 399 /** Generic network media state */ 400 struct ifmedia sc_media; 401 402 /** Media carrier info */ 403 struct ifnet *xnb_ifp; 404 405 /** Our own private carrier state */ 406 unsigned carrier; 407 408 /** Device MAC Address */ 409 uint8_t mac[ETHER_ADDR_LEN]; 410 411 /* Xen related fields */ 412 413 /** 414 * \brief The netif protocol abi in effect. 415 * 416 * There are situations where the back and front ends can 417 * have a different, native abi (e.g. intel x86_64 and 418 * 32bit x86 domains on the same machine). The back-end 419 * always accomodates the front-end's native abi. That 420 * value is pulled from the XenStore and recorded here. 421 */ 422 int abi; 423 424 /** 425 * Name of the bridge to which this VIF is connected, if any 426 * This field is dynamically allocated by xenbus and must be free()ed 427 * when no longer needed 428 */ 429 char *bridge; 430 431 /** The interrupt driven even channel used to signal ring events. */ 432 evtchn_port_t evtchn; 433 434 /** Xen device handle.*/ 435 long handle; 436 437 /** Handle to the communication ring event channel. */ 438 xen_intr_handle_t xen_intr_handle; 439 440 /** 441 * \brief Cached value of the front-end's domain id. 442 * 443 * This value is used at once for each mapped page in 444 * a transaction. We cache it to avoid incuring the 445 * cost of an ivar access every time this is needed. 446 */ 447 domid_t otherend_id; 448 449 /** 450 * Undocumented frontend feature. Has something to do with 451 * scatter/gather IO 452 */ 453 uint8_t can_sg; 454 /** Undocumented frontend feature */ 455 uint8_t gso; 456 /** Undocumented frontend feature */ 457 uint8_t gso_prefix; 458 /** Can checksum TCP/UDP over IPv4 */ 459 uint8_t ip_csum; 460 461 /* Implementation related fields */ 462 /** 463 * Preallocated grant table copy descriptor for RX operations. 464 * Access must be protected by rx_lock 465 */ 466 gnttab_copy_table rx_gnttab; 467 468 /** 469 * Preallocated grant table copy descriptor for TX operations. 470 * Access must be protected by tx_lock 471 */ 472 gnttab_copy_table tx_gnttab; 473 474 /** 475 * Resource representing allocated physical address space 476 * associated with our per-instance kva region. 477 */ 478 struct resource *pseudo_phys_res; 479 480 /** Resource id for allocated physical address space. */ 481 int pseudo_phys_res_id; 482 483 /** Ring mapping and interrupt configuration data. */ 484 struct xnb_ring_config ring_configs[XNB_NUM_RING_TYPES]; 485 486 /** 487 * Global pool of kva used for mapping remote domain ring 488 * and I/O transaction data. 489 */ 490 vm_offset_t kva; 491 492 /** Psuedo-physical address corresponding to kva. */ 493 uint64_t gnt_base_addr; 494 495 /** Various configuration and state bit flags. */ 496 xnb_flag_t flags; 497 498 /** Mutex protecting per-instance data in the receive path. */ 499 struct mtx rx_lock; 500 501 /** Mutex protecting per-instance data in the softc structure. */ 502 struct mtx sc_lock; 503 504 /** Mutex protecting per-instance data in the transmit path. */ 505 struct mtx tx_lock; 506 507 /** The size of the global kva pool. */ 508 int kva_size; 509 510 /** Name of the interface */ 511 char if_name[IFNAMSIZ]; 512}; 513 514/*---------------------------- Debugging functions ---------------------------*/ 515#ifdef XNB_DEBUG 516static void __unused 517xnb_dump_gnttab_copy(const struct gnttab_copy *entry) 518{ 519 if (entry == NULL) { 520 printf("NULL grant table pointer\n"); 521 return; 522 } 523 524 if (entry->flags & GNTCOPY_dest_gref) 525 printf("gnttab dest ref=\t%u\n", entry->dest.u.ref); 526 else 527 printf("gnttab dest gmfn=\t%lu\n", entry->dest.u.gmfn); 528 printf("gnttab dest offset=\t%hu\n", entry->dest.offset); 529 printf("gnttab dest domid=\t%hu\n", entry->dest.domid); 530 if (entry->flags & GNTCOPY_source_gref) 531 printf("gnttab source ref=\t%u\n", entry->source.u.ref); 532 else 533 printf("gnttab source gmfn=\t%lu\n", entry->source.u.gmfn); 534 printf("gnttab source offset=\t%hu\n", entry->source.offset); 535 printf("gnttab source domid=\t%hu\n", entry->source.domid); 536 printf("gnttab len=\t%hu\n", entry->len); 537 printf("gnttab flags=\t%hu\n", entry->flags); 538 printf("gnttab status=\t%hd\n", entry->status); 539} 540 541static int 542xnb_dump_rings(SYSCTL_HANDLER_ARGS) 543{ 544 static char results[720]; 545 struct xnb_softc const* xnb = (struct xnb_softc*)arg1; 546 netif_rx_back_ring_t const* rxb = 547 &xnb->ring_configs[XNB_RING_TYPE_RX].back_ring.rx_ring; 548 netif_tx_back_ring_t const* txb = 549 &xnb->ring_configs[XNB_RING_TYPE_TX].back_ring.tx_ring; 550 551 /* empty the result strings */ 552 results[0] = 0; 553 554 if ( !txb || !txb->sring || !rxb || !rxb->sring ) 555 return (SYSCTL_OUT(req, results, strnlen(results, 720))); 556 557 snprintf(results, 720, 558 "\n\t%35s %18s\n" /* TX, RX */ 559 "\t%16s %18d %18d\n" /* req_cons */ 560 "\t%16s %18d %18d\n" /* nr_ents */ 561 "\t%16s %18d %18d\n" /* rsp_prod_pvt */ 562 "\t%16s %18p %18p\n" /* sring */ 563 "\t%16s %18d %18d\n" /* req_prod */ 564 "\t%16s %18d %18d\n" /* req_event */ 565 "\t%16s %18d %18d\n" /* rsp_prod */ 566 "\t%16s %18d %18d\n", /* rsp_event */ 567 "TX", "RX", 568 "req_cons", txb->req_cons, rxb->req_cons, 569 "nr_ents", txb->nr_ents, rxb->nr_ents, 570 "rsp_prod_pvt", txb->rsp_prod_pvt, rxb->rsp_prod_pvt, 571 "sring", txb->sring, rxb->sring, 572 "sring->req_prod", txb->sring->req_prod, rxb->sring->req_prod, 573 "sring->req_event", txb->sring->req_event, rxb->sring->req_event, 574 "sring->rsp_prod", txb->sring->rsp_prod, rxb->sring->rsp_prod, 575 "sring->rsp_event", txb->sring->rsp_event, rxb->sring->rsp_event); 576 577 return (SYSCTL_OUT(req, results, strnlen(results, 720))); 578} 579 580static void __unused 581xnb_dump_mbuf(const struct mbuf *m) 582{ 583 int len; 584 uint8_t *d; 585 if (m == NULL) 586 return; 587 588 printf("xnb_dump_mbuf:\n"); 589 if (m->m_flags & M_PKTHDR) { 590 printf(" flowid=%10d, csum_flags=%#8x, csum_data=%#8x, " 591 "tso_segsz=%5hd\n", 592 m->m_pkthdr.flowid, (int)m->m_pkthdr.csum_flags, 593 m->m_pkthdr.csum_data, m->m_pkthdr.tso_segsz); 594 printf(" rcvif=%16p, len=%19d\n", 595 m->m_pkthdr.rcvif, m->m_pkthdr.len); 596 } 597 printf(" m_next=%16p, m_nextpk=%16p, m_data=%16p\n", 598 m->m_next, m->m_nextpkt, m->m_data); 599 printf(" m_len=%17d, m_flags=%#15x, m_type=%18u\n", 600 m->m_len, m->m_flags, m->m_type); 601 602 len = m->m_len; 603 d = mtod(m, uint8_t*); 604 while (len > 0) { 605 int i; 606 printf(" "); 607 for (i = 0; (i < 16) && (len > 0); i++, len--) { 608 printf("%02hhx ", *(d++)); 609 } 610 printf("\n"); 611 } 612} 613#endif /* XNB_DEBUG */ 614 615/*------------------------ Inter-Domain Communication ------------------------*/ 616/** 617 * Free dynamically allocated KVA or pseudo-physical address allocations. 618 * 619 * \param xnb Per-instance xnb configuration structure. 620 */ 621static void 622xnb_free_communication_mem(struct xnb_softc *xnb) 623{ 624 if (xnb->kva != 0) { 625 if (xnb->pseudo_phys_res != NULL) { 626 xenmem_free(xnb->dev, xnb->pseudo_phys_res_id, 627 xnb->pseudo_phys_res); 628 xnb->pseudo_phys_res = NULL; 629 } 630 } 631 xnb->kva = 0; 632 xnb->gnt_base_addr = 0; 633} 634 635/** 636 * Cleanup all inter-domain communication mechanisms. 637 * 638 * \param xnb Per-instance xnb configuration structure. 639 */ 640static int 641xnb_disconnect(struct xnb_softc *xnb) 642{ 643 struct gnttab_unmap_grant_ref gnts[XNB_NUM_RING_TYPES]; 644 int error; 645 int i; 646 647 if (xnb->xen_intr_handle != NULL) 648 xen_intr_unbind(&xnb->xen_intr_handle); 649 650 /* 651 * We may still have another thread currently processing requests. We 652 * must acquire the rx and tx locks to make sure those threads are done, 653 * but we can release those locks as soon as we acquire them, because no 654 * more interrupts will be arriving. 655 */ 656 mtx_lock(&xnb->tx_lock); 657 mtx_unlock(&xnb->tx_lock); 658 mtx_lock(&xnb->rx_lock); 659 mtx_unlock(&xnb->rx_lock); 660 661 /* Free malloc'd softc member variables */ 662 if (xnb->bridge != NULL) { 663 free(xnb->bridge, M_XENSTORE); 664 xnb->bridge = NULL; 665 } 666 667 /* All request processing has stopped, so unmap the rings */ 668 for (i=0; i < XNB_NUM_RING_TYPES; i++) { 669 gnts[i].host_addr = xnb->ring_configs[i].gnt_addr; 670 gnts[i].dev_bus_addr = xnb->ring_configs[i].bus_addr; 671 gnts[i].handle = xnb->ring_configs[i].handle; 672 } 673 error = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, gnts, 674 XNB_NUM_RING_TYPES); 675 KASSERT(error == 0, ("Grant table unmap op failed (%d)", error)); 676 677 xnb_free_communication_mem(xnb); 678 /* 679 * Zero the ring config structs because the pointers, handles, and 680 * grant refs contained therein are no longer valid. 681 */ 682 bzero(&xnb->ring_configs[XNB_RING_TYPE_TX], 683 sizeof(struct xnb_ring_config)); 684 bzero(&xnb->ring_configs[XNB_RING_TYPE_RX], 685 sizeof(struct xnb_ring_config)); 686 687 xnb->flags &= ~XNBF_RING_CONNECTED; 688 return (0); 689} 690 691/** 692 * Map a single shared memory ring into domain local address space and 693 * initialize its control structure 694 * 695 * \param xnb Per-instance xnb configuration structure 696 * \param ring_type Array index of this ring in the xnb's array of rings 697 * \return An errno 698 */ 699static int 700xnb_connect_ring(struct xnb_softc *xnb, xnb_ring_type_t ring_type) 701{ 702 struct gnttab_map_grant_ref gnt; 703 struct xnb_ring_config *ring = &xnb->ring_configs[ring_type]; 704 int error; 705 706 /* TX ring type = 0, RX =1 */ 707 ring->va = xnb->kva + ring_type * PAGE_SIZE; 708 ring->gnt_addr = xnb->gnt_base_addr + ring_type * PAGE_SIZE; 709 710 gnt.host_addr = ring->gnt_addr; 711 gnt.flags = GNTMAP_host_map; 712 gnt.ref = ring->ring_ref; 713 gnt.dom = xnb->otherend_id; 714 715 error = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &gnt, 1); 716 if (error != 0) 717 panic("netback: Ring page grant table op failed (%d)", error); 718 719 if (gnt.status != 0) { 720 ring->va = 0; 721 error = EACCES; 722 xenbus_dev_fatal(xnb->dev, error, 723 "Ring shared page mapping failed. " 724 "Status %d.", gnt.status); 725 } else { 726 ring->handle = gnt.handle; 727 ring->bus_addr = gnt.dev_bus_addr; 728 729 if (ring_type == XNB_RING_TYPE_TX) { 730 BACK_RING_INIT(&ring->back_ring.tx_ring, 731 (netif_tx_sring_t*)ring->va, 732 ring->ring_pages * PAGE_SIZE); 733 } else if (ring_type == XNB_RING_TYPE_RX) { 734 BACK_RING_INIT(&ring->back_ring.rx_ring, 735 (netif_rx_sring_t*)ring->va, 736 ring->ring_pages * PAGE_SIZE); 737 } else { 738 xenbus_dev_fatal(xnb->dev, error, 739 "Unknown ring type %d", ring_type); 740 } 741 } 742 743 return error; 744} 745 746/** 747 * Setup the shared memory rings and bind an interrupt to the event channel 748 * used to notify us of ring changes. 749 * 750 * \param xnb Per-instance xnb configuration structure. 751 */ 752static int 753xnb_connect_comms(struct xnb_softc *xnb) 754{ 755 int error; 756 xnb_ring_type_t i; 757 758 if ((xnb->flags & XNBF_RING_CONNECTED) != 0) 759 return (0); 760 761 /* 762 * Kva for our rings are at the tail of the region of kva allocated 763 * by xnb_alloc_communication_mem(). 764 */ 765 for (i=0; i < XNB_NUM_RING_TYPES; i++) { 766 error = xnb_connect_ring(xnb, i); 767 if (error != 0) 768 return error; 769 } 770 771 xnb->flags |= XNBF_RING_CONNECTED; 772 773 error = xen_intr_bind_remote_port(xnb->dev, 774 xnb->otherend_id, 775 xnb->evtchn, 776 /*filter*/NULL, 777 xnb_intr, /*arg*/xnb, 778 INTR_TYPE_BIO | INTR_MPSAFE, 779 &xnb->xen_intr_handle); 780 if (error != 0) { 781 (void)xnb_disconnect(xnb); 782 xenbus_dev_fatal(xnb->dev, error, "binding event channel"); 783 return (error); 784 } 785 786 DPRINTF("rings connected!\n"); 787 788 return (0); 789} 790 791/** 792 * Size KVA and pseudo-physical address allocations based on negotiated 793 * values for the size and number of I/O requests, and the size of our 794 * communication ring. 795 * 796 * \param xnb Per-instance xnb configuration structure. 797 * 798 * These address spaces are used to dynamically map pages in the 799 * front-end's domain into our own. 800 */ 801static int 802xnb_alloc_communication_mem(struct xnb_softc *xnb) 803{ 804 xnb_ring_type_t i; 805 806 xnb->kva_size = 0; 807 for (i=0; i < XNB_NUM_RING_TYPES; i++) { 808 xnb->kva_size += xnb->ring_configs[i].ring_pages * PAGE_SIZE; 809 } 810 811 /* 812 * Reserve a range of pseudo physical memory that we can map 813 * into kva. These pages will only be backed by machine 814 * pages ("real memory") during the lifetime of front-end requests 815 * via grant table operations. We will map the netif tx and rx rings 816 * into this space. 817 */ 818 xnb->pseudo_phys_res_id = 0; 819 xnb->pseudo_phys_res = xenmem_alloc(xnb->dev, &xnb->pseudo_phys_res_id, 820 xnb->kva_size); 821 if (xnb->pseudo_phys_res == NULL) { 822 xnb->kva = 0; 823 return (ENOMEM); 824 } 825 xnb->kva = (vm_offset_t)rman_get_virtual(xnb->pseudo_phys_res); 826 xnb->gnt_base_addr = rman_get_start(xnb->pseudo_phys_res); 827 return (0); 828} 829 830/** 831 * Collect information from the XenStore related to our device and its frontend 832 * 833 * \param xnb Per-instance xnb configuration structure. 834 */ 835static int 836xnb_collect_xenstore_info(struct xnb_softc *xnb) 837{ 838 /** 839 * \todo Linux collects the following info. We should collect most 840 * of this, too: 841 * "feature-rx-notify" 842 */ 843 const char *otherend_path; 844 const char *our_path; 845 int err; 846 unsigned int rx_copy, bridge_len; 847 uint8_t no_csum_offload; 848 849 otherend_path = xenbus_get_otherend_path(xnb->dev); 850 our_path = xenbus_get_node(xnb->dev); 851 852 /* Collect the critical communication parameters */ 853 err = xs_gather(XST_NIL, otherend_path, 854 "tx-ring-ref", "%l" PRIu32, 855 &xnb->ring_configs[XNB_RING_TYPE_TX].ring_ref, 856 "rx-ring-ref", "%l" PRIu32, 857 &xnb->ring_configs[XNB_RING_TYPE_RX].ring_ref, 858 "event-channel", "%" PRIu32, &xnb->evtchn, 859 NULL); 860 if (err != 0) { 861 xenbus_dev_fatal(xnb->dev, err, 862 "Unable to retrieve ring information from " 863 "frontend %s. Unable to connect.", 864 otherend_path); 865 return (err); 866 } 867 868 /* Collect the handle from xenstore */ 869 err = xs_scanf(XST_NIL, our_path, "handle", NULL, "%li", &xnb->handle); 870 if (err != 0) { 871 xenbus_dev_fatal(xnb->dev, err, 872 "Error reading handle from frontend %s. " 873 "Unable to connect.", otherend_path); 874 } 875 876 /* 877 * Collect the bridgename, if any. We do not need bridge_len; we just 878 * throw it away 879 */ 880 err = xs_read(XST_NIL, our_path, "bridge", &bridge_len, 881 (void**)&xnb->bridge); 882 if (err != 0) 883 xnb->bridge = NULL; 884 885 /* 886 * Does the frontend request that we use rx copy? If not, return an 887 * error because this driver only supports rx copy. 888 */ 889 err = xs_scanf(XST_NIL, otherend_path, "request-rx-copy", NULL, 890 "%" PRIu32, &rx_copy); 891 if (err == ENOENT) { 892 err = 0; 893 rx_copy = 0; 894 } 895 if (err < 0) { 896 xenbus_dev_fatal(xnb->dev, err, "reading %s/request-rx-copy", 897 otherend_path); 898 return err; 899 } 900 /** 901 * \todo: figure out the exact meaning of this feature, and when 902 * the frontend will set it to true. It should be set to true 903 * at some point 904 */ 905/* if (!rx_copy)*/ 906/* return EOPNOTSUPP;*/ 907 908 /** \todo Collect the rx notify feature */ 909 910 /* Collect the feature-sg. */ 911 if (xs_scanf(XST_NIL, otherend_path, "feature-sg", NULL, 912 "%hhu", &xnb->can_sg) < 0) 913 xnb->can_sg = 0; 914 915 /* Collect remaining frontend features */ 916 if (xs_scanf(XST_NIL, otherend_path, "feature-gso-tcpv4", NULL, 917 "%hhu", &xnb->gso) < 0) 918 xnb->gso = 0; 919 920 if (xs_scanf(XST_NIL, otherend_path, "feature-gso-tcpv4-prefix", NULL, 921 "%hhu", &xnb->gso_prefix) < 0) 922 xnb->gso_prefix = 0; 923 924 if (xs_scanf(XST_NIL, otherend_path, "feature-no-csum-offload", NULL, 925 "%hhu", &no_csum_offload) < 0) 926 no_csum_offload = 0; 927 xnb->ip_csum = (no_csum_offload == 0); 928 929 return (0); 930} 931 932/** 933 * Supply information about the physical device to the frontend 934 * via XenBus. 935 * 936 * \param xnb Per-instance xnb configuration structure. 937 */ 938static int 939xnb_publish_backend_info(struct xnb_softc *xnb) 940{ 941 struct xs_transaction xst; 942 const char *our_path; 943 int error; 944 945 our_path = xenbus_get_node(xnb->dev); 946 947 do { 948 error = xs_transaction_start(&xst); 949 if (error != 0) { 950 xenbus_dev_fatal(xnb->dev, error, 951 "Error publishing backend info " 952 "(start transaction)"); 953 break; 954 } 955 956 error = xs_printf(xst, our_path, "feature-sg", 957 "%d", XNB_SG); 958 if (error != 0) 959 break; 960 961 error = xs_printf(xst, our_path, "feature-gso-tcpv4", 962 "%d", XNB_GSO_TCPV4); 963 if (error != 0) 964 break; 965 966 error = xs_printf(xst, our_path, "feature-rx-copy", 967 "%d", XNB_RX_COPY); 968 if (error != 0) 969 break; 970 971 error = xs_printf(xst, our_path, "feature-rx-flip", 972 "%d", XNB_RX_FLIP); 973 if (error != 0) 974 break; 975 976 error = xs_transaction_end(xst, 0); 977 if (error != 0 && error != EAGAIN) { 978 xenbus_dev_fatal(xnb->dev, error, "ending transaction"); 979 break; 980 } 981 982 } while (error == EAGAIN); 983 984 return (error); 985} 986 987/** 988 * Connect to our netfront peer now that it has completed publishing 989 * its configuration into the XenStore. 990 * 991 * \param xnb Per-instance xnb configuration structure. 992 */ 993static void 994xnb_connect(struct xnb_softc *xnb) 995{ 996 int error; 997 998 if (xenbus_get_state(xnb->dev) == XenbusStateConnected) 999 return; 1000 1001 if (xnb_collect_xenstore_info(xnb) != 0) 1002 return; 1003 1004 xnb->flags &= ~XNBF_SHUTDOWN; 1005 1006 /* Read front end configuration. */ 1007 1008 /* Allocate resources whose size depends on front-end configuration. */ 1009 error = xnb_alloc_communication_mem(xnb); 1010 if (error != 0) { 1011 xenbus_dev_fatal(xnb->dev, error, 1012 "Unable to allocate communication memory"); 1013 return; 1014 } 1015 1016 /* 1017 * Connect communication channel. 1018 */ 1019 error = xnb_connect_comms(xnb); 1020 if (error != 0) { 1021 /* Specific errors are reported by xnb_connect_comms(). */ 1022 return; 1023 } 1024 xnb->carrier = 1; 1025 1026 /* Ready for I/O. */ 1027 xenbus_set_state(xnb->dev, XenbusStateConnected); 1028} 1029 1030/*-------------------------- Device Teardown Support -------------------------*/ 1031/** 1032 * Perform device shutdown functions. 1033 * 1034 * \param xnb Per-instance xnb configuration structure. 1035 * 1036 * Mark this instance as shutting down, wait for any active requests 1037 * to drain, disconnect from the front-end, and notify any waiters (e.g. 1038 * a thread invoking our detach method) that detach can now proceed. 1039 */ 1040static int 1041xnb_shutdown(struct xnb_softc *xnb) 1042{ 1043 /* 1044 * Due to the need to drop our mutex during some 1045 * xenbus operations, it is possible for two threads 1046 * to attempt to close out shutdown processing at 1047 * the same time. Tell the caller that hits this 1048 * race to try back later. 1049 */ 1050 if ((xnb->flags & XNBF_IN_SHUTDOWN) != 0) 1051 return (EAGAIN); 1052 1053 xnb->flags |= XNBF_SHUTDOWN; 1054 1055 xnb->flags |= XNBF_IN_SHUTDOWN; 1056 1057 mtx_unlock(&xnb->sc_lock); 1058 /* Free the network interface */ 1059 xnb->carrier = 0; 1060 if (xnb->xnb_ifp != NULL) { 1061 ether_ifdetach(xnb->xnb_ifp); 1062 if_free(xnb->xnb_ifp); 1063 xnb->xnb_ifp = NULL; 1064 } 1065 mtx_lock(&xnb->sc_lock); 1066 1067 xnb_disconnect(xnb); 1068 1069 mtx_unlock(&xnb->sc_lock); 1070 if (xenbus_get_state(xnb->dev) < XenbusStateClosing) 1071 xenbus_set_state(xnb->dev, XenbusStateClosing); 1072 mtx_lock(&xnb->sc_lock); 1073 1074 xnb->flags &= ~XNBF_IN_SHUTDOWN; 1075 1076 1077 /* Indicate to xnb_detach() that is it safe to proceed. */ 1078 wakeup(xnb); 1079 1080 return (0); 1081} 1082 1083/** 1084 * Report an attach time error to the console and Xen, and cleanup 1085 * this instance by forcing immediate detach processing. 1086 * 1087 * \param xnb Per-instance xnb configuration structure. 1088 * \param err Errno describing the error. 1089 * \param fmt Printf style format and arguments 1090 */ 1091static void 1092xnb_attach_failed(struct xnb_softc *xnb, int err, const char *fmt, ...) 1093{ 1094 va_list ap; 1095 va_list ap_hotplug; 1096 1097 va_start(ap, fmt); 1098 va_copy(ap_hotplug, ap); 1099 xs_vprintf(XST_NIL, xenbus_get_node(xnb->dev), 1100 "hotplug-error", fmt, ap_hotplug); 1101 va_end(ap_hotplug); 1102 xs_printf(XST_NIL, xenbus_get_node(xnb->dev), 1103 "hotplug-status", "error"); 1104 1105 xenbus_dev_vfatal(xnb->dev, err, fmt, ap); 1106 va_end(ap); 1107 1108 xs_printf(XST_NIL, xenbus_get_node(xnb->dev), 1109 "online", "0"); 1110 xnb_detach(xnb->dev); 1111} 1112 1113/*---------------------------- NewBus Entrypoints ----------------------------*/ 1114/** 1115 * Inspect a XenBus device and claim it if is of the appropriate type. 1116 * 1117 * \param dev NewBus device object representing a candidate XenBus device. 1118 * 1119 * \return 0 for success, errno codes for failure. 1120 */ 1121static int 1122xnb_probe(device_t dev) 1123{ 1124 if (!strcmp(xenbus_get_type(dev), "vif")) { 1125 DPRINTF("Claiming device %d, %s\n", device_get_unit(dev), 1126 devclass_get_name(device_get_devclass(dev))); 1127 device_set_desc(dev, "Backend Virtual Network Device"); 1128 device_quiet(dev); 1129 return (0); 1130 } 1131 return (ENXIO); 1132} 1133 1134/** 1135 * Setup sysctl variables to control various Network Back parameters. 1136 * 1137 * \param xnb Xen Net Back softc. 1138 * 1139 */ 1140static void 1141xnb_setup_sysctl(struct xnb_softc *xnb) 1142{ 1143 struct sysctl_ctx_list *sysctl_ctx = NULL; 1144 struct sysctl_oid *sysctl_tree = NULL; 1145 1146 sysctl_ctx = device_get_sysctl_ctx(xnb->dev); 1147 if (sysctl_ctx == NULL) 1148 return; 1149 1150 sysctl_tree = device_get_sysctl_tree(xnb->dev); 1151 if (sysctl_tree == NULL) 1152 return; 1153 1154#ifdef XNB_DEBUG 1155 SYSCTL_ADD_PROC(sysctl_ctx, 1156 SYSCTL_CHILDREN(sysctl_tree), 1157 OID_AUTO, 1158 "unit_test_results", 1159 CTLTYPE_STRING | CTLFLAG_RD, 1160 xnb, 1161 0, 1162 xnb_unit_test_main, 1163 "A", 1164 "Results of builtin unit tests"); 1165 1166 SYSCTL_ADD_PROC(sysctl_ctx, 1167 SYSCTL_CHILDREN(sysctl_tree), 1168 OID_AUTO, 1169 "dump_rings", 1170 CTLTYPE_STRING | CTLFLAG_RD, 1171 xnb, 1172 0, 1173 xnb_dump_rings, 1174 "A", 1175 "Xennet Back Rings"); 1176#endif /* XNB_DEBUG */ 1177} 1178 1179/** 1180 * Create a network device. 1181 * @param handle device handle 1182 */ 1183int 1184create_netdev(device_t dev) 1185{ 1186 struct ifnet *ifp; 1187 struct xnb_softc *xnb; 1188 int err = 0; 1189 uint32_t handle; 1190 1191 xnb = device_get_softc(dev); 1192 mtx_init(&xnb->sc_lock, "xnb_softc", "xen netback softc lock", MTX_DEF); 1193 mtx_init(&xnb->tx_lock, "xnb_tx", "xen netback tx lock", MTX_DEF); 1194 mtx_init(&xnb->rx_lock, "xnb_rx", "xen netback rx lock", MTX_DEF); 1195 1196 xnb->dev = dev; 1197 1198 ifmedia_init(&xnb->sc_media, 0, xnb_ifmedia_upd, xnb_ifmedia_sts); 1199 ifmedia_add(&xnb->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 1200 ifmedia_set(&xnb->sc_media, IFM_ETHER|IFM_MANUAL); 1201 1202 /* 1203 * Set the MAC address to a dummy value (00:00:00:00:00), 1204 * if the MAC address of the host-facing interface is set 1205 * to the same as the guest-facing one (the value found in 1206 * xenstore), the bridge would stop delivering packets to 1207 * us because it would see that the destination address of 1208 * the packet is the same as the interface, and so the bridge 1209 * would expect the packet has already been delivered locally 1210 * (and just drop it). 1211 */ 1212 bzero(&xnb->mac[0], sizeof(xnb->mac)); 1213 1214 /* The interface will be named using the following nomenclature: 1215 * 1216 * xnb<domid>.<handle> 1217 * 1218 * Where handle is the oder of the interface referred to the guest. 1219 */ 1220 err = xs_scanf(XST_NIL, xenbus_get_node(xnb->dev), "handle", NULL, 1221 "%" PRIu32, &handle); 1222 if (err != 0) 1223 return (err); 1224 snprintf(xnb->if_name, IFNAMSIZ, "xnb%" PRIu16 ".%" PRIu32, 1225 xenbus_get_otherend_id(dev), handle); 1226 1227 if (err == 0) { 1228 /* Set up ifnet structure */ 1229 ifp = xnb->xnb_ifp = if_alloc(IFT_ETHER); 1230 ifp->if_softc = xnb; 1231 if_initname(ifp, xnb->if_name, IF_DUNIT_NONE); 1232 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1233 ifp->if_ioctl = xnb_ioctl; 1234 ifp->if_output = ether_output; 1235 ifp->if_start = xnb_start; 1236#ifdef notyet 1237 ifp->if_watchdog = xnb_watchdog; 1238#endif 1239 ifp->if_init = xnb_ifinit; 1240 ifp->if_mtu = ETHERMTU; 1241 ifp->if_snd.ifq_maxlen = NET_RX_RING_SIZE - 1; 1242 1243 ifp->if_hwassist = XNB_CSUM_FEATURES; 1244 ifp->if_capabilities = IFCAP_HWCSUM; 1245 ifp->if_capenable = IFCAP_HWCSUM; 1246 1247 ether_ifattach(ifp, xnb->mac); 1248 xnb->carrier = 0; 1249 } 1250 1251 return err; 1252} 1253 1254/** 1255 * Attach to a XenBus device that has been claimed by our probe routine. 1256 * 1257 * \param dev NewBus device object representing this Xen Net Back instance. 1258 * 1259 * \return 0 for success, errno codes for failure. 1260 */ 1261static int 1262xnb_attach(device_t dev) 1263{ 1264 struct xnb_softc *xnb; 1265 int error; 1266 xnb_ring_type_t i; 1267 1268 error = create_netdev(dev); 1269 if (error != 0) { 1270 xenbus_dev_fatal(dev, error, "creating netdev"); 1271 return (error); 1272 } 1273 1274 DPRINTF("Attaching to %s\n", xenbus_get_node(dev)); 1275 1276 /* 1277 * Basic initialization. 1278 * After this block it is safe to call xnb_detach() 1279 * to clean up any allocated data for this instance. 1280 */ 1281 xnb = device_get_softc(dev); 1282 xnb->otherend_id = xenbus_get_otherend_id(dev); 1283 for (i=0; i < XNB_NUM_RING_TYPES; i++) { 1284 xnb->ring_configs[i].ring_pages = 1; 1285 } 1286 1287 /* 1288 * Setup sysctl variables. 1289 */ 1290 xnb_setup_sysctl(xnb); 1291 1292 /* Update hot-plug status to satisfy xend. */ 1293 error = xs_printf(XST_NIL, xenbus_get_node(xnb->dev), 1294 "hotplug-status", "connected"); 1295 if (error != 0) { 1296 xnb_attach_failed(xnb, error, "writing %s/hotplug-status", 1297 xenbus_get_node(xnb->dev)); 1298 return (error); 1299 } 1300 1301 if ((error = xnb_publish_backend_info(xnb)) != 0) { 1302 /* 1303 * If we can't publish our data, we cannot participate 1304 * in this connection, and waiting for a front-end state 1305 * change will not help the situation. 1306 */ 1307 xnb_attach_failed(xnb, error, 1308 "Publishing backend status for %s", 1309 xenbus_get_node(xnb->dev)); 1310 return error; 1311 } 1312 1313 /* Tell the front end that we are ready to connect. */ 1314 xenbus_set_state(dev, XenbusStateInitWait); 1315 1316 return (0); 1317} 1318 1319/** 1320 * Detach from a net back device instance. 1321 * 1322 * \param dev NewBus device object representing this Xen Net Back instance. 1323 * 1324 * \return 0 for success, errno codes for failure. 1325 * 1326 * \note A net back device may be detached at any time in its life-cycle, 1327 * including part way through the attach process. For this reason, 1328 * initialization order and the intialization state checks in this 1329 * routine must be carefully coupled so that attach time failures 1330 * are gracefully handled. 1331 */ 1332static int 1333xnb_detach(device_t dev) 1334{ 1335 struct xnb_softc *xnb; 1336 1337 DPRINTF("\n"); 1338 1339 xnb = device_get_softc(dev); 1340 mtx_lock(&xnb->sc_lock); 1341 while (xnb_shutdown(xnb) == EAGAIN) { 1342 msleep(xnb, &xnb->sc_lock, /*wakeup prio unchanged*/0, 1343 "xnb_shutdown", 0); 1344 } 1345 mtx_unlock(&xnb->sc_lock); 1346 DPRINTF("\n"); 1347 1348 mtx_destroy(&xnb->tx_lock); 1349 mtx_destroy(&xnb->rx_lock); 1350 mtx_destroy(&xnb->sc_lock); 1351 return (0); 1352} 1353 1354/** 1355 * Prepare this net back device for suspension of this VM. 1356 * 1357 * \param dev NewBus device object representing this Xen net Back instance. 1358 * 1359 * \return 0 for success, errno codes for failure. 1360 */ 1361static int 1362xnb_suspend(device_t dev) 1363{ 1364 return (0); 1365} 1366 1367/** 1368 * Perform any processing required to recover from a suspended state. 1369 * 1370 * \param dev NewBus device object representing this Xen Net Back instance. 1371 * 1372 * \return 0 for success, errno codes for failure. 1373 */ 1374static int 1375xnb_resume(device_t dev) 1376{ 1377 return (0); 1378} 1379 1380/** 1381 * Handle state changes expressed via the XenStore by our front-end peer. 1382 * 1383 * \param dev NewBus device object representing this Xen 1384 * Net Back instance. 1385 * \param frontend_state The new state of the front-end. 1386 * 1387 * \return 0 for success, errno codes for failure. 1388 */ 1389static void 1390xnb_frontend_changed(device_t dev, XenbusState frontend_state) 1391{ 1392 struct xnb_softc *xnb; 1393 1394 xnb = device_get_softc(dev); 1395 1396 DPRINTF("frontend_state=%s, xnb_state=%s\n", 1397 xenbus_strstate(frontend_state), 1398 xenbus_strstate(xenbus_get_state(xnb->dev))); 1399 1400 switch (frontend_state) { 1401 case XenbusStateInitialising: 1402 break; 1403 case XenbusStateInitialised: 1404 case XenbusStateConnected: 1405 xnb_connect(xnb); 1406 break; 1407 case XenbusStateClosing: 1408 case XenbusStateClosed: 1409 mtx_lock(&xnb->sc_lock); 1410 xnb_shutdown(xnb); 1411 mtx_unlock(&xnb->sc_lock); 1412 if (frontend_state == XenbusStateClosed) 1413 xenbus_set_state(xnb->dev, XenbusStateClosed); 1414 break; 1415 default: 1416 xenbus_dev_fatal(xnb->dev, EINVAL, "saw state %d at frontend", 1417 frontend_state); 1418 break; 1419 } 1420} 1421 1422 1423/*---------------------------- Request Processing ----------------------------*/ 1424/** 1425 * Interrupt handler bound to the shared ring's event channel. 1426 * Entry point for the xennet transmit path in netback 1427 * Transfers packets from the Xen ring to the host's generic networking stack 1428 * 1429 * \param arg Callback argument registerd during event channel 1430 * binding - the xnb_softc for this instance. 1431 */ 1432static void 1433xnb_intr(void *arg) 1434{ 1435 struct xnb_softc *xnb; 1436 struct ifnet *ifp; 1437 netif_tx_back_ring_t *txb; 1438 RING_IDX req_prod_local; 1439 1440 xnb = (struct xnb_softc *)arg; 1441 ifp = xnb->xnb_ifp; 1442 txb = &xnb->ring_configs[XNB_RING_TYPE_TX].back_ring.tx_ring; 1443 1444 mtx_lock(&xnb->tx_lock); 1445 do { 1446 int notify; 1447 req_prod_local = txb->sring->req_prod; 1448 xen_rmb(); 1449 1450 for (;;) { 1451 struct mbuf *mbufc; 1452 int err; 1453 1454 err = xnb_recv(txb, xnb->otherend_id, &mbufc, ifp, 1455 xnb->tx_gnttab); 1456 if (err || (mbufc == NULL)) 1457 break; 1458 1459 /* Send the packet to the generic network stack */ 1460 (*xnb->xnb_ifp->if_input)(xnb->xnb_ifp, mbufc); 1461 } 1462 1463 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(txb, notify); 1464 if (notify != 0) 1465 xen_intr_signal(xnb->xen_intr_handle); 1466 1467 txb->sring->req_event = txb->req_cons + 1; 1468 xen_mb(); 1469 } while (txb->sring->req_prod != req_prod_local) ; 1470 mtx_unlock(&xnb->tx_lock); 1471 1472 xnb_start(ifp); 1473} 1474 1475 1476/** 1477 * Build a struct xnb_pkt based on netif_tx_request's from a netif tx ring. 1478 * Will read exactly 0 or 1 packets from the ring; never a partial packet. 1479 * \param[out] pkt The returned packet. If there is an error building 1480 * the packet, pkt.list_len will be set to 0. 1481 * \param[in] tx_ring Pointer to the Ring that is the input to this function 1482 * \param[in] start The ring index of the first potential request 1483 * \return The number of requests consumed to build this packet 1484 */ 1485static int 1486xnb_ring2pkt(struct xnb_pkt *pkt, const netif_tx_back_ring_t *tx_ring, 1487 RING_IDX start) 1488{ 1489 /* 1490 * Outline: 1491 * 1) Initialize pkt 1492 * 2) Read the first request of the packet 1493 * 3) Read the extras 1494 * 4) Set cdr 1495 * 5) Loop on the remainder of the packet 1496 * 6) Finalize pkt (stuff like car_size and list_len) 1497 */ 1498 int idx = start; 1499 int discard = 0; /* whether to discard the packet */ 1500 int more_data = 0; /* there are more request past the last one */ 1501 uint16_t cdr_size = 0; /* accumulated size of requests 2 through n */ 1502 1503 xnb_pkt_initialize(pkt); 1504 1505 /* Read the first request */ 1506 if (RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) { 1507 netif_tx_request_t *tx = RING_GET_REQUEST(tx_ring, idx); 1508 pkt->size = tx->size; 1509 pkt->flags = tx->flags & ~NETTXF_more_data; 1510 more_data = tx->flags & NETTXF_more_data; 1511 pkt->list_len++; 1512 pkt->car = idx; 1513 idx++; 1514 } 1515 1516 /* Read the extra info */ 1517 if ((pkt->flags & NETTXF_extra_info) && 1518 RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) { 1519 netif_extra_info_t *ext = 1520 (netif_extra_info_t*) RING_GET_REQUEST(tx_ring, idx); 1521 pkt->extra.type = ext->type; 1522 switch (pkt->extra.type) { 1523 case XEN_NETIF_EXTRA_TYPE_GSO: 1524 pkt->extra.u.gso = ext->u.gso; 1525 break; 1526 default: 1527 /* 1528 * The reference Linux netfront driver will 1529 * never set any other extra.type. So we don't 1530 * know what to do with it. Let's print an 1531 * error, then consume and discard the packet 1532 */ 1533 printf("xnb(%s:%d): Unknown extra info type %d." 1534 " Discarding packet\n", 1535 __func__, __LINE__, pkt->extra.type); 1536 xnb_dump_txreq(start, RING_GET_REQUEST(tx_ring, 1537 start)); 1538 xnb_dump_txreq(idx, RING_GET_REQUEST(tx_ring, 1539 idx)); 1540 discard = 1; 1541 break; 1542 } 1543 1544 pkt->extra.flags = ext->flags; 1545 if (ext->flags & XEN_NETIF_EXTRA_FLAG_MORE) { 1546 /* 1547 * The reference linux netfront driver never sets this 1548 * flag (nor does any other known netfront). So we 1549 * will discard the packet. 1550 */ 1551 printf("xnb(%s:%d): Request sets " 1552 "XEN_NETIF_EXTRA_FLAG_MORE, but we can't handle " 1553 "that\n", __func__, __LINE__); 1554 xnb_dump_txreq(start, RING_GET_REQUEST(tx_ring, start)); 1555 xnb_dump_txreq(idx, RING_GET_REQUEST(tx_ring, idx)); 1556 discard = 1; 1557 } 1558 1559 idx++; 1560 } 1561 1562 /* Set cdr. If there is not more data, cdr is invalid */ 1563 pkt->cdr = idx; 1564 1565 /* Loop on remainder of packet */ 1566 while (more_data && RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) { 1567 netif_tx_request_t *tx = RING_GET_REQUEST(tx_ring, idx); 1568 pkt->list_len++; 1569 cdr_size += tx->size; 1570 if (tx->flags & ~NETTXF_more_data) { 1571 /* There should be no other flags set at this point */ 1572 printf("xnb(%s:%d): Request sets unknown flags %d " 1573 "after the 1st request in the packet.\n", 1574 __func__, __LINE__, tx->flags); 1575 xnb_dump_txreq(start, RING_GET_REQUEST(tx_ring, start)); 1576 xnb_dump_txreq(idx, RING_GET_REQUEST(tx_ring, idx)); 1577 } 1578 1579 more_data = tx->flags & NETTXF_more_data; 1580 idx++; 1581 } 1582 1583 /* Finalize packet */ 1584 if (more_data != 0) { 1585 /* The ring ran out of requests before finishing the packet */ 1586 xnb_pkt_invalidate(pkt); 1587 idx = start; /* tell caller that we consumed no requests */ 1588 } else { 1589 /* Calculate car_size */ 1590 pkt->car_size = pkt->size - cdr_size; 1591 } 1592 if (discard != 0) { 1593 xnb_pkt_invalidate(pkt); 1594 } 1595 1596 return idx - start; 1597} 1598 1599 1600/** 1601 * Respond to all the requests that constituted pkt. Builds the responses and 1602 * writes them to the ring, but doesn't push them to the shared ring. 1603 * \param[in] pkt the packet that needs a response 1604 * \param[in] error true if there was an error handling the packet, such 1605 * as in the hypervisor copy op or mbuf allocation 1606 * \param[out] ring Responses go here 1607 */ 1608static void 1609xnb_txpkt2rsp(const struct xnb_pkt *pkt, netif_tx_back_ring_t *ring, 1610 int error) 1611{ 1612 /* 1613 * Outline: 1614 * 1) Respond to the first request 1615 * 2) Respond to the extra info reques 1616 * Loop through every remaining request in the packet, generating 1617 * responses that copy those requests' ids and sets the status 1618 * appropriately. 1619 */ 1620 netif_tx_request_t *tx; 1621 netif_tx_response_t *rsp; 1622 int i; 1623 uint16_t status; 1624 1625 status = (xnb_pkt_is_valid(pkt) == 0) || error ? 1626 NETIF_RSP_ERROR : NETIF_RSP_OKAY; 1627 KASSERT((pkt->list_len == 0) || (ring->rsp_prod_pvt == pkt->car), 1628 ("Cannot respond to ring requests out of order")); 1629 1630 if (pkt->list_len >= 1) { 1631 uint16_t id; 1632 tx = RING_GET_REQUEST(ring, ring->rsp_prod_pvt); 1633 id = tx->id; 1634 rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); 1635 rsp->id = id; 1636 rsp->status = status; 1637 ring->rsp_prod_pvt++; 1638 1639 if (pkt->flags & NETRXF_extra_info) { 1640 rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); 1641 rsp->status = NETIF_RSP_NULL; 1642 ring->rsp_prod_pvt++; 1643 } 1644 } 1645 1646 for (i=0; i < pkt->list_len - 1; i++) { 1647 uint16_t id; 1648 tx = RING_GET_REQUEST(ring, ring->rsp_prod_pvt); 1649 id = tx->id; 1650 rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); 1651 rsp->id = id; 1652 rsp->status = status; 1653 ring->rsp_prod_pvt++; 1654 } 1655} 1656 1657/** 1658 * Create an mbuf chain to represent a packet. Initializes all of the headers 1659 * in the mbuf chain, but does not copy the data. The returned chain must be 1660 * free()'d when no longer needed 1661 * \param[in] pkt A packet to model the mbuf chain after 1662 * \return A newly allocated mbuf chain, possibly with clusters attached. 1663 * NULL on failure 1664 */ 1665static struct mbuf* 1666xnb_pkt2mbufc(const struct xnb_pkt *pkt, struct ifnet *ifp) 1667{ 1668 /** 1669 * \todo consider using a memory pool for mbufs instead of 1670 * reallocating them for every packet 1671 */ 1672 /** \todo handle extra data */ 1673 struct mbuf *m; 1674 1675 m = m_getm(NULL, pkt->size, M_NOWAIT, MT_DATA); 1676 1677 if (m != NULL) { 1678 m->m_pkthdr.rcvif = ifp; 1679 if (pkt->flags & NETTXF_data_validated) { 1680 /* 1681 * We lie to the host OS and always tell it that the 1682 * checksums are ok, because the packet is unlikely to 1683 * get corrupted going across domains. 1684 */ 1685 m->m_pkthdr.csum_flags = ( 1686 CSUM_IP_CHECKED | 1687 CSUM_IP_VALID | 1688 CSUM_DATA_VALID | 1689 CSUM_PSEUDO_HDR 1690 ); 1691 m->m_pkthdr.csum_data = 0xffff; 1692 } 1693 } 1694 return m; 1695} 1696 1697/** 1698 * Build a gnttab_copy table that can be used to copy data from a pkt 1699 * to an mbufc. Does not actually perform the copy. Always uses gref's on 1700 * the packet side. 1701 * \param[in] pkt pkt's associated requests form the src for 1702 * the copy operation 1703 * \param[in] mbufc mbufc's storage forms the dest for the copy operation 1704 * \param[out] gnttab Storage for the returned grant table 1705 * \param[in] txb Pointer to the backend ring structure 1706 * \param[in] otherend_id The domain ID of the other end of the copy 1707 * \return The number of gnttab entries filled 1708 */ 1709static int 1710xnb_txpkt2gnttab(const struct xnb_pkt *pkt, const struct mbuf *mbufc, 1711 gnttab_copy_table gnttab, const netif_tx_back_ring_t *txb, 1712 domid_t otherend_id) 1713{ 1714 1715 const struct mbuf *mbuf = mbufc;/* current mbuf within the chain */ 1716 int gnt_idx = 0; /* index into grant table */ 1717 RING_IDX r_idx = pkt->car; /* index into tx ring buffer */ 1718 int r_ofs = 0; /* offset of next data within tx request's data area */ 1719 int m_ofs = 0; /* offset of next data within mbuf's data area */ 1720 /* size in bytes that still needs to be represented in the table */ 1721 uint16_t size_remaining = pkt->size; 1722 1723 while (size_remaining > 0) { 1724 const netif_tx_request_t *txq = RING_GET_REQUEST(txb, r_idx); 1725 const size_t mbuf_space = M_TRAILINGSPACE(mbuf) - m_ofs; 1726 const size_t req_size = 1727 r_idx == pkt->car ? pkt->car_size : txq->size; 1728 const size_t pkt_space = req_size - r_ofs; 1729 /* 1730 * space is the largest amount of data that can be copied in the 1731 * grant table's next entry 1732 */ 1733 const size_t space = MIN(pkt_space, mbuf_space); 1734 1735 /* TODO: handle this error condition without panicking */ 1736 KASSERT(gnt_idx < GNTTAB_LEN, ("Grant table is too short")); 1737 1738 gnttab[gnt_idx].source.u.ref = txq->gref; 1739 gnttab[gnt_idx].source.domid = otherend_id; 1740 gnttab[gnt_idx].source.offset = txq->offset + r_ofs; 1741 gnttab[gnt_idx].dest.u.gmfn = virt_to_mfn( 1742 mtod(mbuf, vm_offset_t) + m_ofs); 1743 gnttab[gnt_idx].dest.offset = virt_to_offset( 1744 mtod(mbuf, vm_offset_t) + m_ofs); 1745 gnttab[gnt_idx].dest.domid = DOMID_SELF; 1746 gnttab[gnt_idx].len = space; 1747 gnttab[gnt_idx].flags = GNTCOPY_source_gref; 1748 1749 gnt_idx++; 1750 r_ofs += space; 1751 m_ofs += space; 1752 size_remaining -= space; 1753 if (req_size - r_ofs <= 0) { 1754 /* Must move to the next tx request */ 1755 r_ofs = 0; 1756 r_idx = (r_idx == pkt->car) ? pkt->cdr : r_idx + 1; 1757 } 1758 if (M_TRAILINGSPACE(mbuf) - m_ofs <= 0) { 1759 /* Must move to the next mbuf */ 1760 m_ofs = 0; 1761 mbuf = mbuf->m_next; 1762 } 1763 } 1764 1765 return gnt_idx; 1766} 1767 1768/** 1769 * Check the status of the grant copy operations, and update mbufs various 1770 * non-data fields to reflect the data present. 1771 * \param[in,out] mbufc mbuf chain to update. The chain must be valid and of 1772 * the correct length, and data should already be present 1773 * \param[in] gnttab A grant table for a just completed copy op 1774 * \param[in] n_entries The number of valid entries in the grant table 1775 */ 1776static void 1777xnb_update_mbufc(struct mbuf *mbufc, const gnttab_copy_table gnttab, 1778 int n_entries) 1779{ 1780 struct mbuf *mbuf = mbufc; 1781 int i; 1782 size_t total_size = 0; 1783 1784 for (i = 0; i < n_entries; i++) { 1785 KASSERT(gnttab[i].status == GNTST_okay, 1786 ("Some gnttab_copy entry had error status %hd\n", 1787 gnttab[i].status)); 1788 1789 mbuf->m_len += gnttab[i].len; 1790 total_size += gnttab[i].len; 1791 if (M_TRAILINGSPACE(mbuf) <= 0) { 1792 mbuf = mbuf->m_next; 1793 } 1794 } 1795 mbufc->m_pkthdr.len = total_size; 1796 1797#if defined(INET) || defined(INET6) 1798 xnb_add_mbuf_cksum(mbufc); 1799#endif 1800} 1801 1802/** 1803 * Dequeue at most one packet from the shared ring 1804 * \param[in,out] txb Netif tx ring. A packet will be removed from it, and 1805 * its private indices will be updated. But the indices 1806 * will not be pushed to the shared ring. 1807 * \param[in] ifnet Interface to which the packet will be sent 1808 * \param[in] otherend Domain ID of the other end of the ring 1809 * \param[out] mbufc The assembled mbuf chain, ready to send to the generic 1810 * networking stack 1811 * \param[in,out] gnttab Pointer to enough memory for a grant table. We make 1812 * this a function parameter so that we will take less 1813 * stack space. 1814 * \return An error code 1815 */ 1816static int 1817xnb_recv(netif_tx_back_ring_t *txb, domid_t otherend, struct mbuf **mbufc, 1818 struct ifnet *ifnet, gnttab_copy_table gnttab) 1819{ 1820 struct xnb_pkt pkt; 1821 /* number of tx requests consumed to build the last packet */ 1822 int num_consumed; 1823 int nr_ents; 1824 1825 *mbufc = NULL; 1826 num_consumed = xnb_ring2pkt(&pkt, txb, txb->req_cons); 1827 if (num_consumed == 0) 1828 return 0; /* Nothing to receive */ 1829 1830 /* update statistics independent of errors */ 1831 if_inc_counter(ifnet, IFCOUNTER_IPACKETS, 1); 1832 1833 /* 1834 * if we got here, then 1 or more requests was consumed, but the packet 1835 * is not necessarily valid. 1836 */ 1837 if (xnb_pkt_is_valid(&pkt) == 0) { 1838 /* got a garbage packet, respond and drop it */ 1839 xnb_txpkt2rsp(&pkt, txb, 1); 1840 txb->req_cons += num_consumed; 1841 DPRINTF("xnb_intr: garbage packet, num_consumed=%d\n", 1842 num_consumed); 1843 if_inc_counter(ifnet, IFCOUNTER_IERRORS, 1); 1844 return EINVAL; 1845 } 1846 1847 *mbufc = xnb_pkt2mbufc(&pkt, ifnet); 1848 1849 if (*mbufc == NULL) { 1850 /* 1851 * Couldn't allocate mbufs. Respond and drop the packet. Do 1852 * not consume the requests 1853 */ 1854 xnb_txpkt2rsp(&pkt, txb, 1); 1855 DPRINTF("xnb_intr: Couldn't allocate mbufs, num_consumed=%d\n", 1856 num_consumed); 1857 if_inc_counter(ifnet, IFCOUNTER_IQDROPS, 1); 1858 return ENOMEM; 1859 } 1860 1861 nr_ents = xnb_txpkt2gnttab(&pkt, *mbufc, gnttab, txb, otherend); 1862 1863 if (nr_ents > 0) { 1864 int __unused hv_ret = HYPERVISOR_grant_table_op(GNTTABOP_copy, 1865 gnttab, nr_ents); 1866 KASSERT(hv_ret == 0, 1867 ("HYPERVISOR_grant_table_op returned %d\n", hv_ret)); 1868 xnb_update_mbufc(*mbufc, gnttab, nr_ents); 1869 } 1870 1871 xnb_txpkt2rsp(&pkt, txb, 0); 1872 txb->req_cons += num_consumed; 1873 return 0; 1874} 1875 1876/** 1877 * Create an xnb_pkt based on the contents of an mbuf chain. 1878 * \param[in] mbufc mbuf chain to transform into a packet 1879 * \param[out] pkt Storage for the newly generated xnb_pkt 1880 * \param[in] start The ring index of the first available slot in the rx 1881 * ring 1882 * \param[in] space The number of free slots in the rx ring 1883 * \retval 0 Success 1884 * \retval EINVAL mbufc was corrupt or not convertible into a pkt 1885 * \retval EAGAIN There was not enough space in the ring to queue the 1886 * packet 1887 */ 1888static int 1889xnb_mbufc2pkt(const struct mbuf *mbufc, struct xnb_pkt *pkt, 1890 RING_IDX start, int space) 1891{ 1892 1893 int retval = 0; 1894 1895 if ((mbufc == NULL) || 1896 ( (mbufc->m_flags & M_PKTHDR) == 0) || 1897 (mbufc->m_pkthdr.len == 0)) { 1898 xnb_pkt_invalidate(pkt); 1899 retval = EINVAL; 1900 } else { 1901 int slots_required; 1902 1903 xnb_pkt_validate(pkt); 1904 pkt->flags = 0; 1905 pkt->size = mbufc->m_pkthdr.len; 1906 pkt->car = start; 1907 pkt->car_size = mbufc->m_len; 1908 1909 if (mbufc->m_pkthdr.csum_flags & CSUM_TSO) { 1910 pkt->flags |= NETRXF_extra_info; 1911 pkt->extra.u.gso.size = mbufc->m_pkthdr.tso_segsz; 1912 pkt->extra.u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; 1913 pkt->extra.u.gso.pad = 0; 1914 pkt->extra.u.gso.features = 0; 1915 pkt->extra.type = XEN_NETIF_EXTRA_TYPE_GSO; 1916 pkt->extra.flags = 0; 1917 pkt->cdr = start + 2; 1918 } else { 1919 pkt->cdr = start + 1; 1920 } 1921 if (mbufc->m_pkthdr.csum_flags & (CSUM_TSO | CSUM_DELAY_DATA)) { 1922 pkt->flags |= 1923 (NETRXF_csum_blank | NETRXF_data_validated); 1924 } 1925 1926 /* 1927 * Each ring response can have up to PAGE_SIZE of data. 1928 * Assume that we can defragment the mbuf chain efficiently 1929 * into responses so that each response but the last uses all 1930 * PAGE_SIZE bytes. 1931 */ 1932 pkt->list_len = (pkt->size + PAGE_SIZE - 1) / PAGE_SIZE; 1933 1934 if (pkt->list_len > 1) { 1935 pkt->flags |= NETRXF_more_data; 1936 } 1937 1938 slots_required = pkt->list_len + 1939 (pkt->flags & NETRXF_extra_info ? 1 : 0); 1940 if (slots_required > space) { 1941 xnb_pkt_invalidate(pkt); 1942 retval = EAGAIN; 1943 } 1944 } 1945 1946 return retval; 1947} 1948 1949/** 1950 * Build a gnttab_copy table that can be used to copy data from an mbuf chain 1951 * to the frontend's shared buffers. Does not actually perform the copy. 1952 * Always uses gref's on the other end's side. 1953 * \param[in] pkt pkt's associated responses form the dest for the copy 1954 * operatoin 1955 * \param[in] mbufc The source for the copy operation 1956 * \param[out] gnttab Storage for the returned grant table 1957 * \param[in] rxb Pointer to the backend ring structure 1958 * \param[in] otherend_id The domain ID of the other end of the copy 1959 * \return The number of gnttab entries filled 1960 */ 1961static int 1962xnb_rxpkt2gnttab(const struct xnb_pkt *pkt, const struct mbuf *mbufc, 1963 gnttab_copy_table gnttab, const netif_rx_back_ring_t *rxb, 1964 domid_t otherend_id) 1965{ 1966 1967 const struct mbuf *mbuf = mbufc;/* current mbuf within the chain */ 1968 int gnt_idx = 0; /* index into grant table */ 1969 RING_IDX r_idx = pkt->car; /* index into rx ring buffer */ 1970 int r_ofs = 0; /* offset of next data within rx request's data area */ 1971 int m_ofs = 0; /* offset of next data within mbuf's data area */ 1972 /* size in bytes that still needs to be represented in the table */ 1973 uint16_t size_remaining; 1974 1975 size_remaining = (xnb_pkt_is_valid(pkt) != 0) ? pkt->size : 0; 1976 1977 while (size_remaining > 0) { 1978 const netif_rx_request_t *rxq = RING_GET_REQUEST(rxb, r_idx); 1979 const size_t mbuf_space = mbuf->m_len - m_ofs; 1980 /* Xen shared pages have an implied size of PAGE_SIZE */ 1981 const size_t req_size = PAGE_SIZE; 1982 const size_t pkt_space = req_size - r_ofs; 1983 /* 1984 * space is the largest amount of data that can be copied in the 1985 * grant table's next entry 1986 */ 1987 const size_t space = MIN(pkt_space, mbuf_space); 1988 1989 /* TODO: handle this error condition without panicing */ 1990 KASSERT(gnt_idx < GNTTAB_LEN, ("Grant table is too short")); 1991 1992 gnttab[gnt_idx].dest.u.ref = rxq->gref; 1993 gnttab[gnt_idx].dest.domid = otherend_id; 1994 gnttab[gnt_idx].dest.offset = r_ofs; 1995 gnttab[gnt_idx].source.u.gmfn = virt_to_mfn( 1996 mtod(mbuf, vm_offset_t) + m_ofs); 1997 gnttab[gnt_idx].source.offset = virt_to_offset( 1998 mtod(mbuf, vm_offset_t) + m_ofs); 1999 gnttab[gnt_idx].source.domid = DOMID_SELF; 2000 gnttab[gnt_idx].len = space; 2001 gnttab[gnt_idx].flags = GNTCOPY_dest_gref; 2002 2003 gnt_idx++; 2004 2005 r_ofs += space; 2006 m_ofs += space; 2007 size_remaining -= space; 2008 if (req_size - r_ofs <= 0) { 2009 /* Must move to the next rx request */ 2010 r_ofs = 0; 2011 r_idx = (r_idx == pkt->car) ? pkt->cdr : r_idx + 1; 2012 } 2013 if (mbuf->m_len - m_ofs <= 0) { 2014 /* Must move to the next mbuf */ 2015 m_ofs = 0; 2016 mbuf = mbuf->m_next; 2017 } 2018 } 2019 2020 return gnt_idx; 2021} 2022 2023/** 2024 * Generates responses for all the requests that constituted pkt. Builds 2025 * responses and writes them to the ring, but doesn't push the shared ring 2026 * indices. 2027 * \param[in] pkt the packet that needs a response 2028 * \param[in] gnttab The grant copy table corresponding to this packet. 2029 * Used to determine how many rsp->netif_rx_response_t's to 2030 * generate. 2031 * \param[in] n_entries Number of relevant entries in the grant table 2032 * \param[out] ring Responses go here 2033 * \return The number of RX requests that were consumed to generate 2034 * the responses 2035 */ 2036static int 2037xnb_rxpkt2rsp(const struct xnb_pkt *pkt, const gnttab_copy_table gnttab, 2038 int n_entries, netif_rx_back_ring_t *ring) 2039{ 2040 /* 2041 * This code makes the following assumptions: 2042 * * All entries in gnttab set GNTCOPY_dest_gref 2043 * * The entries in gnttab are grouped by their grefs: any two 2044 * entries with the same gref must be adjacent 2045 */ 2046 int error = 0; 2047 int gnt_idx, i; 2048 int n_responses = 0; 2049 grant_ref_t last_gref = GRANT_REF_INVALID; 2050 RING_IDX r_idx; 2051 2052 KASSERT(gnttab != NULL, ("Received a null granttable copy")); 2053 2054 /* 2055 * In the event of an error, we only need to send one response to the 2056 * netfront. In that case, we musn't write any data to the responses 2057 * after the one we send. So we must loop all the way through gnttab 2058 * looking for errors before we generate any responses 2059 * 2060 * Since we're looping through the grant table anyway, we'll count the 2061 * number of different gref's in it, which will tell us how many 2062 * responses to generate 2063 */ 2064 for (gnt_idx = 0; gnt_idx < n_entries; gnt_idx++) { 2065 int16_t status = gnttab[gnt_idx].status; 2066 if (status != GNTST_okay) { 2067 DPRINTF( 2068 "Got error %d for hypervisor gnttab_copy status\n", 2069 status); 2070 error = 1; 2071 break; 2072 } 2073 if (gnttab[gnt_idx].dest.u.ref != last_gref) { 2074 n_responses++; 2075 last_gref = gnttab[gnt_idx].dest.u.ref; 2076 } 2077 } 2078 2079 if (error != 0) { 2080 uint16_t id; 2081 netif_rx_response_t *rsp; 2082 2083 id = RING_GET_REQUEST(ring, ring->rsp_prod_pvt)->id; 2084 rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); 2085 rsp->id = id; 2086 rsp->status = NETIF_RSP_ERROR; 2087 n_responses = 1; 2088 } else { 2089 gnt_idx = 0; 2090 const int has_extra = pkt->flags & NETRXF_extra_info; 2091 if (has_extra != 0) 2092 n_responses++; 2093 2094 for (i = 0; i < n_responses; i++) { 2095 netif_rx_request_t rxq; 2096 netif_rx_response_t *rsp; 2097 2098 r_idx = ring->rsp_prod_pvt + i; 2099 /* 2100 * We copy the structure of rxq instead of making a 2101 * pointer because it shares the same memory as rsp. 2102 */ 2103 rxq = *(RING_GET_REQUEST(ring, r_idx)); 2104 rsp = RING_GET_RESPONSE(ring, r_idx); 2105 if (has_extra && (i == 1)) { 2106 netif_extra_info_t *ext = 2107 (netif_extra_info_t*)rsp; 2108 ext->type = XEN_NETIF_EXTRA_TYPE_GSO; 2109 ext->flags = 0; 2110 ext->u.gso.size = pkt->extra.u.gso.size; 2111 ext->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; 2112 ext->u.gso.pad = 0; 2113 ext->u.gso.features = 0; 2114 } else { 2115 rsp->id = rxq.id; 2116 rsp->status = GNTST_okay; 2117 rsp->offset = 0; 2118 rsp->flags = 0; 2119 if (i < pkt->list_len - 1) 2120 rsp->flags |= NETRXF_more_data; 2121 if ((i == 0) && has_extra) 2122 rsp->flags |= NETRXF_extra_info; 2123 if ((i == 0) && 2124 (pkt->flags & NETRXF_data_validated)) { 2125 rsp->flags |= NETRXF_data_validated; 2126 rsp->flags |= NETRXF_csum_blank; 2127 } 2128 rsp->status = 0; 2129 for (; gnttab[gnt_idx].dest.u.ref == rxq.gref; 2130 gnt_idx++) { 2131 rsp->status += gnttab[gnt_idx].len; 2132 } 2133 } 2134 } 2135 } 2136 2137 ring->req_cons += n_responses; 2138 ring->rsp_prod_pvt += n_responses; 2139 return n_responses; 2140} 2141 2142#if defined(INET) || defined(INET6) 2143/** 2144 * Add IP, TCP, and/or UDP checksums to every mbuf in a chain. The first mbuf 2145 * in the chain must start with a struct ether_header. 2146 * 2147 * XXX This function will perform incorrectly on UDP packets that are split up 2148 * into multiple ethernet frames. 2149 */ 2150static void 2151xnb_add_mbuf_cksum(struct mbuf *mbufc) 2152{ 2153 struct ether_header *eh; 2154 struct ip *iph; 2155 uint16_t ether_type; 2156 2157 eh = mtod(mbufc, struct ether_header*); 2158 ether_type = ntohs(eh->ether_type); 2159 if (ether_type != ETHERTYPE_IP) { 2160 /* Nothing to calculate */ 2161 return; 2162 } 2163 2164 iph = (struct ip*)(eh + 1); 2165 if (mbufc->m_pkthdr.csum_flags & CSUM_IP_VALID) { 2166 iph->ip_sum = 0; 2167 iph->ip_sum = in_cksum_hdr(iph); 2168 } 2169 2170 switch (iph->ip_p) { 2171 case IPPROTO_TCP: 2172 if (mbufc->m_pkthdr.csum_flags & CSUM_IP_VALID) { 2173 size_t tcplen = ntohs(iph->ip_len) - sizeof(struct ip); 2174 struct tcphdr *th = (struct tcphdr*)(iph + 1); 2175 th->th_sum = in_pseudo(iph->ip_src.s_addr, 2176 iph->ip_dst.s_addr, htons(IPPROTO_TCP + tcplen)); 2177 th->th_sum = in_cksum_skip(mbufc, 2178 sizeof(struct ether_header) + ntohs(iph->ip_len), 2179 sizeof(struct ether_header) + (iph->ip_hl << 2)); 2180 } 2181 break; 2182 case IPPROTO_UDP: 2183 if (mbufc->m_pkthdr.csum_flags & CSUM_IP_VALID) { 2184 size_t udplen = ntohs(iph->ip_len) - sizeof(struct ip); 2185 struct udphdr *uh = (struct udphdr*)(iph + 1); 2186 uh->uh_sum = in_pseudo(iph->ip_src.s_addr, 2187 iph->ip_dst.s_addr, htons(IPPROTO_UDP + udplen)); 2188 uh->uh_sum = in_cksum_skip(mbufc, 2189 sizeof(struct ether_header) + ntohs(iph->ip_len), 2190 sizeof(struct ether_header) + (iph->ip_hl << 2)); 2191 } 2192 break; 2193 default: 2194 break; 2195 } 2196} 2197#endif /* INET || INET6 */ 2198 2199static void 2200xnb_stop(struct xnb_softc *xnb) 2201{ 2202 struct ifnet *ifp; 2203 2204 mtx_assert(&xnb->sc_lock, MA_OWNED); 2205 ifp = xnb->xnb_ifp; 2206 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2207 if_link_state_change(ifp, LINK_STATE_DOWN); 2208} 2209 2210static int 2211xnb_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2212{ 2213 struct xnb_softc *xnb = ifp->if_softc; 2214 struct ifreq *ifr = (struct ifreq*) data; 2215#ifdef INET 2216 struct ifaddr *ifa = (struct ifaddr*)data; 2217#endif 2218 int error = 0; 2219 2220 switch (cmd) { 2221 case SIOCSIFFLAGS: 2222 mtx_lock(&xnb->sc_lock); 2223 if (ifp->if_flags & IFF_UP) { 2224 xnb_ifinit_locked(xnb); 2225 } else { 2226 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2227 xnb_stop(xnb); 2228 } 2229 } 2230 /* 2231 * Note: netfront sets a variable named xn_if_flags 2232 * here, but that variable is never read 2233 */ 2234 mtx_unlock(&xnb->sc_lock); 2235 break; 2236 case SIOCSIFADDR: 2237#ifdef INET 2238 mtx_lock(&xnb->sc_lock); 2239 if (ifa->ifa_addr->sa_family == AF_INET) { 2240 ifp->if_flags |= IFF_UP; 2241 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2242 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | 2243 IFF_DRV_OACTIVE); 2244 if_link_state_change(ifp, 2245 LINK_STATE_DOWN); 2246 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2247 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2248 if_link_state_change(ifp, 2249 LINK_STATE_UP); 2250 } 2251 arp_ifinit(ifp, ifa); 2252 mtx_unlock(&xnb->sc_lock); 2253 } else { 2254 mtx_unlock(&xnb->sc_lock); 2255#endif 2256 error = ether_ioctl(ifp, cmd, data); 2257#ifdef INET 2258 } 2259#endif 2260 break; 2261 case SIOCSIFCAP: 2262 mtx_lock(&xnb->sc_lock); 2263 if (ifr->ifr_reqcap & IFCAP_TXCSUM) { 2264 ifp->if_capenable |= IFCAP_TXCSUM; 2265 ifp->if_hwassist |= XNB_CSUM_FEATURES; 2266 } else { 2267 ifp->if_capenable &= ~(IFCAP_TXCSUM); 2268 ifp->if_hwassist &= ~(XNB_CSUM_FEATURES); 2269 } 2270 if ((ifr->ifr_reqcap & IFCAP_RXCSUM)) { 2271 ifp->if_capenable |= IFCAP_RXCSUM; 2272 } else { 2273 ifp->if_capenable &= ~(IFCAP_RXCSUM); 2274 } 2275 /* 2276 * TODO enable TSO4 and LRO once we no longer need 2277 * to calculate checksums in software 2278 */ 2279#if 0 2280 if (ifr->if_reqcap |= IFCAP_TSO4) { 2281 if (IFCAP_TXCSUM & ifp->if_capenable) { 2282 printf("xnb: Xen netif requires that " 2283 "TXCSUM be enabled in order " 2284 "to use TSO4\n"); 2285 error = EINVAL; 2286 } else { 2287 ifp->if_capenable |= IFCAP_TSO4; 2288 ifp->if_hwassist |= CSUM_TSO; 2289 } 2290 } else { 2291 ifp->if_capenable &= ~(IFCAP_TSO4); 2292 ifp->if_hwassist &= ~(CSUM_TSO); 2293 } 2294 if (ifr->ifreqcap |= IFCAP_LRO) { 2295 ifp->if_capenable |= IFCAP_LRO; 2296 } else { 2297 ifp->if_capenable &= ~(IFCAP_LRO); 2298 } 2299#endif 2300 mtx_unlock(&xnb->sc_lock); 2301 break; 2302 case SIOCSIFMTU: 2303 ifp->if_mtu = ifr->ifr_mtu; 2304 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2305 xnb_ifinit(xnb); 2306 break; 2307 case SIOCADDMULTI: 2308 case SIOCDELMULTI: 2309 case SIOCSIFMEDIA: 2310 case SIOCGIFMEDIA: 2311 error = ifmedia_ioctl(ifp, ifr, &xnb->sc_media, cmd); 2312 break; 2313 default: 2314 error = ether_ioctl(ifp, cmd, data); 2315 break; 2316 } 2317 return (error); 2318} 2319 2320static void 2321xnb_start_locked(struct ifnet *ifp) 2322{ 2323 netif_rx_back_ring_t *rxb; 2324 struct xnb_softc *xnb; 2325 struct mbuf *mbufc; 2326 RING_IDX req_prod_local; 2327 2328 xnb = ifp->if_softc; 2329 rxb = &xnb->ring_configs[XNB_RING_TYPE_RX].back_ring.rx_ring; 2330 2331 if (!xnb->carrier) 2332 return; 2333 2334 do { 2335 int out_of_space = 0; 2336 int notify; 2337 req_prod_local = rxb->sring->req_prod; 2338 xen_rmb(); 2339 for (;;) { 2340 int error; 2341 2342 IF_DEQUEUE(&ifp->if_snd, mbufc); 2343 if (mbufc == NULL) 2344 break; 2345 error = xnb_send(rxb, xnb->otherend_id, mbufc, 2346 xnb->rx_gnttab); 2347 switch (error) { 2348 case EAGAIN: 2349 /* 2350 * Insufficient space in the ring. 2351 * Requeue pkt and send when space is 2352 * available. 2353 */ 2354 IF_PREPEND(&ifp->if_snd, mbufc); 2355 /* 2356 * Perhaps the frontend missed an IRQ 2357 * and went to sleep. Notify it to wake 2358 * it up. 2359 */ 2360 out_of_space = 1; 2361 break; 2362 2363 case EINVAL: 2364 /* OS gave a corrupt packet. Drop it.*/ 2365 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2366 /* FALLTHROUGH */ 2367 default: 2368 /* Send succeeded, or packet had error. 2369 * Free the packet */ 2370 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 2371 if (mbufc) 2372 m_freem(mbufc); 2373 break; 2374 } 2375 if (out_of_space != 0) 2376 break; 2377 } 2378 2379 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(rxb, notify); 2380 if ((notify != 0) || (out_of_space != 0)) 2381 xen_intr_signal(xnb->xen_intr_handle); 2382 rxb->sring->req_event = req_prod_local + 1; 2383 xen_mb(); 2384 } while (rxb->sring->req_prod != req_prod_local) ; 2385} 2386 2387/** 2388 * Sends one packet to the ring. Blocks until the packet is on the ring 2389 * \param[in] mbufc Contains one packet to send. Caller must free 2390 * \param[in,out] rxb The packet will be pushed onto this ring, but the 2391 * otherend will not be notified. 2392 * \param[in] otherend The domain ID of the other end of the connection 2393 * \retval EAGAIN The ring did not have enough space for the packet. 2394 * The ring has not been modified 2395 * \param[in,out] gnttab Pointer to enough memory for a grant table. We make 2396 * this a function parameter so that we will take less 2397 * stack space. 2398 * \retval EINVAL mbufc was corrupt or not convertible into a pkt 2399 */ 2400static int 2401xnb_send(netif_rx_back_ring_t *ring, domid_t otherend, const struct mbuf *mbufc, 2402 gnttab_copy_table gnttab) 2403{ 2404 struct xnb_pkt pkt; 2405 int error, n_entries, n_reqs; 2406 RING_IDX space; 2407 2408 space = ring->sring->req_prod - ring->req_cons; 2409 error = xnb_mbufc2pkt(mbufc, &pkt, ring->rsp_prod_pvt, space); 2410 if (error != 0) 2411 return error; 2412 n_entries = xnb_rxpkt2gnttab(&pkt, mbufc, gnttab, ring, otherend); 2413 if (n_entries != 0) { 2414 int __unused hv_ret = HYPERVISOR_grant_table_op(GNTTABOP_copy, 2415 gnttab, n_entries); 2416 KASSERT(hv_ret == 0, ("HYPERVISOR_grant_table_op returned %d\n", 2417 hv_ret)); 2418 } 2419 2420 n_reqs = xnb_rxpkt2rsp(&pkt, gnttab, n_entries, ring); 2421 2422 return 0; 2423} 2424 2425static void 2426xnb_start(struct ifnet *ifp) 2427{ 2428 struct xnb_softc *xnb; 2429 2430 xnb = ifp->if_softc; 2431 mtx_lock(&xnb->rx_lock); 2432 xnb_start_locked(ifp); 2433 mtx_unlock(&xnb->rx_lock); 2434} 2435 2436/* equivalent of network_open() in Linux */ 2437static void 2438xnb_ifinit_locked(struct xnb_softc *xnb) 2439{ 2440 struct ifnet *ifp; 2441 2442 ifp = xnb->xnb_ifp; 2443 2444 mtx_assert(&xnb->sc_lock, MA_OWNED); 2445 2446 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2447 return; 2448 2449 xnb_stop(xnb); 2450 2451 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2452 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2453 if_link_state_change(ifp, LINK_STATE_UP); 2454} 2455 2456 2457static void 2458xnb_ifinit(void *xsc) 2459{ 2460 struct xnb_softc *xnb = xsc; 2461 2462 mtx_lock(&xnb->sc_lock); 2463 xnb_ifinit_locked(xnb); 2464 mtx_unlock(&xnb->sc_lock); 2465} 2466 2467/** 2468 * Callback used by the generic networking code to tell us when our carrier 2469 * state has changed. Since we don't have a physical carrier, we don't care 2470 */ 2471static int 2472xnb_ifmedia_upd(struct ifnet *ifp) 2473{ 2474 return (0); 2475} 2476 2477/** 2478 * Callback used by the generic networking code to ask us what our carrier 2479 * state is. Since we don't have a physical carrier, this is very simple 2480 */ 2481static void 2482xnb_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2483{ 2484 ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE; 2485 ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; 2486} 2487 2488 2489/*---------------------------- NewBus Registration ---------------------------*/ 2490static device_method_t xnb_methods[] = { 2491 /* Device interface */ 2492 DEVMETHOD(device_probe, xnb_probe), 2493 DEVMETHOD(device_attach, xnb_attach), 2494 DEVMETHOD(device_detach, xnb_detach), 2495 DEVMETHOD(device_shutdown, bus_generic_shutdown), 2496 DEVMETHOD(device_suspend, xnb_suspend), 2497 DEVMETHOD(device_resume, xnb_resume), 2498 2499 /* Xenbus interface */ 2500 DEVMETHOD(xenbus_otherend_changed, xnb_frontend_changed), 2501 2502 { 0, 0 } 2503}; 2504 2505static driver_t xnb_driver = { 2506 "xnb", 2507 xnb_methods, 2508 sizeof(struct xnb_softc), 2509}; 2510devclass_t xnb_devclass; 2511 2512DRIVER_MODULE(xnb, xenbusb_back, xnb_driver, xnb_devclass, 0, 0); 2513 2514 2515/*-------------------------- Unit Tests -------------------------------------*/ 2516#ifdef XNB_DEBUG 2517#include "netback_unit_tests.c" 2518#endif
|