1/*- 2 * Copyright (c) 2016, Vincenzo Maffione 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29/* Driver for ptnet paravirtualized network device. */ 30 31#include <sys/cdefs.h> 32 33#include <sys/types.h> 34#include <sys/param.h> 35#include <sys/systm.h> 36#include <sys/kernel.h> 37#include <sys/sockio.h> 38#include <sys/mbuf.h> 39#include <sys/malloc.h> 40#include <sys/module.h> 41#include <sys/socket.h> 42#include <sys/sysctl.h> 43#include <sys/lock.h> 44#include <sys/mutex.h> 45#include <sys/taskqueue.h> 46#include <sys/smp.h> 47#include <sys/time.h> 48#include <machine/smp.h> 49 50#include <vm/uma.h> 51#include <vm/vm.h> 52#include <vm/pmap.h> 53 54#include <net/ethernet.h> 55#include <net/if.h> 56#include <net/if_var.h> 57#include <net/if_arp.h> 58#include <net/if_dl.h> 59#include <net/if_types.h> 60#include <net/if_media.h> 61#include <net/if_vlan_var.h> 62#include <net/bpf.h> 63 64#include <netinet/in_systm.h> 65#include <netinet/in.h> 66#include <netinet/ip.h> 67#include <netinet/ip6.h> 68#include <netinet6/ip6_var.h> 69#include <netinet/udp.h> 70#include <netinet/tcp.h> 71 72#include <machine/bus.h> 73#include <machine/resource.h> 74#include <sys/bus.h> 75#include <sys/rman.h> 76 77#include <dev/pci/pcivar.h> 78#include <dev/pci/pcireg.h> 79 80#include "opt_inet.h" 81#include "opt_inet6.h" 82 83#include <sys/selinfo.h> 84#include <net/netmap.h> 85#include <dev/netmap/netmap_kern.h> 86#include <net/netmap_virt.h> 87#include <dev/netmap/netmap_mem2.h> 88#include <dev/virtio/network/virtio_net.h> 89 90#ifdef WITH_PTNETMAP 91 92#ifndef INET 93#error "INET not defined, cannot support offloadings" 94#endif 95 96#if __FreeBSD_version >= 1100000 97static uint64_t ptnet_get_counter(if_t, ift_counter); 98#else 99typedef struct ifnet *if_t; 100#define if_getsoftc(_ifp) (_ifp)->if_softc 101#endif 102 103//#define PTNETMAP_STATS 104//#define DEBUG 105#ifdef DEBUG 106#define DBG(x) x 107#else /* !DEBUG */ 108#define DBG(x) 109#endif /* !DEBUG */ 110 111extern int ptnet_vnet_hdr; /* Tunable parameter */ 112 113struct ptnet_softc; 114 115struct ptnet_queue_stats { 116 uint64_t packets; /* if_[io]packets */ 117 uint64_t bytes; /* if_[io]bytes */ 118 uint64_t errors; /* if_[io]errors */ 119 uint64_t iqdrops; /* if_iqdrops */ 120 uint64_t mcasts; /* if_[io]mcasts */ 121#ifdef PTNETMAP_STATS 122 uint64_t intrs; 123 uint64_t kicks; 124#endif /* PTNETMAP_STATS */ 125}; 126 127struct ptnet_queue { 128 struct ptnet_softc *sc; 129 struct resource *irq; 130 void *cookie; 131 int kring_id; 132 struct nm_csb_atok *atok; 133 struct nm_csb_ktoa *ktoa; 134 unsigned int kick; 135 struct mtx lock; 136 struct buf_ring *bufring; /* for TX queues */ 137 struct ptnet_queue_stats stats; 138#ifdef PTNETMAP_STATS 139 struct ptnet_queue_stats last_stats; 140#endif /* PTNETMAP_STATS */ 141 struct taskqueue *taskq; 142 struct task task; 143 char lock_name[16]; 144}; 145 146#define PTNET_Q_LOCK(_pq) mtx_lock(&(_pq)->lock) 147#define PTNET_Q_TRYLOCK(_pq) mtx_trylock(&(_pq)->lock) 148#define PTNET_Q_UNLOCK(_pq) mtx_unlock(&(_pq)->lock) 149 150struct ptnet_softc { 151 device_t dev; 152 if_t ifp; 153 struct ifmedia media; 154 struct mtx lock; 155 char lock_name[16]; 156 char hwaddr[ETHER_ADDR_LEN]; 157 158 /* Mirror of PTFEAT register. */ 159 uint32_t ptfeatures; 160 unsigned int vnet_hdr_len; 161 162 /* PCI BARs support. */ 163 struct resource *iomem; 164 struct resource *msix_mem; 165 166 unsigned int num_rings; 167 unsigned int num_tx_rings; 168 struct ptnet_queue *queues; 169 struct ptnet_queue *rxqueues; 170 struct nm_csb_atok *csb_gh; 171 struct nm_csb_ktoa *csb_hg; 172 173 unsigned int min_tx_space; 174 175 struct netmap_pt_guest_adapter *ptna; 176 177 struct callout tick; 178#ifdef PTNETMAP_STATS 179 struct timeval last_ts; 180#endif /* PTNETMAP_STATS */ 181}; 182 183#define PTNET_CORE_LOCK(_sc) mtx_lock(&(_sc)->lock) 184#define PTNET_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->lock) 185 186static int ptnet_probe(device_t); 187static int ptnet_attach(device_t); 188static int ptnet_detach(device_t); 189static int ptnet_suspend(device_t); 190static int ptnet_resume(device_t); 191static int ptnet_shutdown(device_t); 192 193static void ptnet_init(void *opaque); 194static int ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data); 195static int ptnet_init_locked(struct ptnet_softc *sc); 196static int ptnet_stop(struct ptnet_softc *sc); 197static int ptnet_transmit(if_t ifp, struct mbuf *m); 198static int ptnet_drain_transmit_queue(struct ptnet_queue *pq, 199 unsigned int budget, 200 bool may_resched); 201static void ptnet_qflush(if_t ifp); 202static void ptnet_tx_task(void *context, int pending); 203 204static int ptnet_media_change(if_t ifp); 205static void ptnet_media_status(if_t ifp, struct ifmediareq *ifmr); 206#ifdef PTNETMAP_STATS 207static void ptnet_tick(void *opaque); 208#endif 209 210static int ptnet_irqs_init(struct ptnet_softc *sc); 211static void ptnet_irqs_fini(struct ptnet_softc *sc); 212 213static uint32_t ptnet_nm_ptctl(struct ptnet_softc *sc, uint32_t cmd); 214static int ptnet_nm_config(struct netmap_adapter *na, 215 struct nm_config_info *info); 216static void ptnet_update_vnet_hdr(struct ptnet_softc *sc); 217static int ptnet_nm_register(struct netmap_adapter *na, int onoff); 218static int ptnet_nm_txsync(struct netmap_kring *kring, int flags); 219static int ptnet_nm_rxsync(struct netmap_kring *kring, int flags); 220static void ptnet_nm_intr(struct netmap_adapter *na, int onoff); 221 222static void ptnet_tx_intr(void *opaque); 223static void ptnet_rx_intr(void *opaque); 224 225static unsigned ptnet_rx_discard(struct netmap_kring *kring, 226 unsigned int head); 227static int ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, 228 bool may_resched); 229static void ptnet_rx_task(void *context, int pending); 230 231#ifdef DEVICE_POLLING 232static poll_handler_t ptnet_poll; 233#endif 234 235static device_method_t ptnet_methods[] = { 236 DEVMETHOD(device_probe, ptnet_probe), 237 DEVMETHOD(device_attach, ptnet_attach), 238 DEVMETHOD(device_detach, ptnet_detach), 239 DEVMETHOD(device_suspend, ptnet_suspend), 240 DEVMETHOD(device_resume, ptnet_resume), 241 DEVMETHOD(device_shutdown, ptnet_shutdown), 242 DEVMETHOD_END 243}; 244 245static driver_t ptnet_driver = { 246 "ptnet", 247 ptnet_methods, 248 sizeof(struct ptnet_softc) 249}; 250 251/* We use (SI_ORDER_MIDDLE+2) here, see DEV_MODULE_ORDERED() invocation. */ 252static devclass_t ptnet_devclass; 253DRIVER_MODULE_ORDERED(ptnet, pci, ptnet_driver, ptnet_devclass, 254 NULL, NULL, SI_ORDER_MIDDLE + 2); 255 256static int 257ptnet_probe(device_t dev) 258{ 259 if (pci_get_vendor(dev) != PTNETMAP_PCI_VENDOR_ID || 260 pci_get_device(dev) != PTNETMAP_PCI_NETIF_ID) { 261 return (ENXIO); 262 } 263 264 device_set_desc(dev, "ptnet network adapter"); 265 266 return (BUS_PROBE_DEFAULT); 267} 268 269static inline void ptnet_kick(struct ptnet_queue *pq) 270{ 271#ifdef PTNETMAP_STATS 272 pq->stats.kicks ++; 273#endif /* PTNETMAP_STATS */ 274 bus_write_4(pq->sc->iomem, pq->kick, 0); 275} 276 277#define PTNET_BUF_RING_SIZE 4096 278#define PTNET_RX_BUDGET 512 279#define PTNET_RX_BATCH 1 280#define PTNET_TX_BUDGET 512 281#define PTNET_TX_BATCH 64 282#define PTNET_HDR_SIZE sizeof(struct virtio_net_hdr_mrg_rxbuf) 283#define PTNET_MAX_PKT_SIZE 65536 284 285#define PTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP) 286#define PTNET_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6) 287#define PTNET_ALL_OFFLOAD (CSUM_TSO | PTNET_CSUM_OFFLOAD |\ 288 PTNET_CSUM_OFFLOAD_IPV6) 289 290static int 291ptnet_attach(device_t dev) 292{ 293 uint32_t ptfeatures = 0; 294 unsigned int num_rx_rings, num_tx_rings; 295 struct netmap_adapter na_arg; 296 unsigned int nifp_offset; 297 struct ptnet_softc *sc; 298 if_t ifp; 299 uint32_t macreg; 300 int err, rid; 301 int i; 302 303 sc = device_get_softc(dev); 304 sc->dev = dev; 305 306 /* Setup PCI resources. */ 307 pci_enable_busmaster(dev); 308 309 rid = PCIR_BAR(PTNETMAP_IO_PCI_BAR); 310 sc->iomem = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, 311 RF_ACTIVE); 312 if (sc->iomem == NULL) { 313 device_printf(dev, "Failed to map I/O BAR\n"); 314 return (ENXIO); 315 } 316 317 /* Negotiate features with the hypervisor. */ 318 if (ptnet_vnet_hdr) { 319 ptfeatures |= PTNETMAP_F_VNET_HDR; 320 } 321 bus_write_4(sc->iomem, PTNET_IO_PTFEAT, ptfeatures); /* wanted */ 322 ptfeatures = bus_read_4(sc->iomem, PTNET_IO_PTFEAT); /* acked */ 323 sc->ptfeatures = ptfeatures; 324 325 num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); 326 num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); 327 sc->num_rings = num_tx_rings + num_rx_rings; 328 sc->num_tx_rings = num_tx_rings; 329 330 if (sc->num_rings * sizeof(struct nm_csb_atok) > PAGE_SIZE) { 331 device_printf(dev, "CSB cannot handle that many rings (%u)\n", 332 sc->num_rings); 333 err = ENOMEM; 334 goto err_path; 335 } 336 337 /* Allocate CSB and carry out CSB allocation protocol. */ 338 sc->csb_gh = contigmalloc(2*PAGE_SIZE, M_DEVBUF, M_NOWAIT | M_ZERO, 339 (size_t)0, -1UL, PAGE_SIZE, 0); 340 if (sc->csb_gh == NULL) { 341 device_printf(dev, "Failed to allocate CSB\n"); 342 err = ENOMEM; 343 goto err_path; 344 } 345 sc->csb_hg = (struct nm_csb_ktoa *)(((char *)sc->csb_gh) + PAGE_SIZE); 346 347 { 348 /* 349 * We use uint64_t rather than vm_paddr_t since we 350 * need 64 bit addresses even on 32 bit platforms. 351 */ 352 uint64_t paddr = vtophys(sc->csb_gh); 353 354 /* CSB allocation protocol: write to BAH first, then 355 * to BAL (for both GH and HG sections). */ 356 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, 357 (paddr >> 32) & 0xffffffff); 358 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, 359 paddr & 0xffffffff); 360 paddr = vtophys(sc->csb_hg); 361 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, 362 (paddr >> 32) & 0xffffffff); 363 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, 364 paddr & 0xffffffff); 365 } 366 367 /* Allocate and initialize per-queue data structures. */ 368 sc->queues = malloc(sizeof(struct ptnet_queue) * sc->num_rings, 369 M_DEVBUF, M_NOWAIT | M_ZERO); 370 if (sc->queues == NULL) { 371 err = ENOMEM; 372 goto err_path; 373 } 374 sc->rxqueues = sc->queues + num_tx_rings; 375 376 for (i = 0; i < sc->num_rings; i++) { 377 struct ptnet_queue *pq = sc->queues + i; 378 379 pq->sc = sc; 380 pq->kring_id = i; 381 pq->kick = PTNET_IO_KICK_BASE + 4 * i; 382 pq->atok = sc->csb_gh + i; 383 pq->ktoa = sc->csb_hg + i; 384 snprintf(pq->lock_name, sizeof(pq->lock_name), "%s-%d", 385 device_get_nameunit(dev), i); 386 mtx_init(&pq->lock, pq->lock_name, NULL, MTX_DEF); 387 if (i >= num_tx_rings) { 388 /* RX queue: fix kring_id. */ 389 pq->kring_id -= num_tx_rings; 390 } else { 391 /* TX queue: allocate buf_ring. */ 392 pq->bufring = buf_ring_alloc(PTNET_BUF_RING_SIZE, 393 M_DEVBUF, M_NOWAIT, &pq->lock); 394 if (pq->bufring == NULL) { 395 err = ENOMEM; 396 goto err_path; 397 } 398 } 399 } 400 401 sc->min_tx_space = 64; /* Safe initial value. */ 402 403 err = ptnet_irqs_init(sc); 404 if (err) { 405 goto err_path; 406 } 407 408 /* Setup Ethernet interface. */ 409 sc->ifp = ifp = if_alloc(IFT_ETHER); 410 if (ifp == NULL) { 411 device_printf(dev, "Failed to allocate ifnet\n"); 412 err = ENOMEM; 413 goto err_path; 414 } 415 416 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 417 ifp->if_baudrate = IF_Gbps(10); 418 ifp->if_softc = sc; 419 ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX; 420 ifp->if_init = ptnet_init; 421 ifp->if_ioctl = ptnet_ioctl; 422#if __FreeBSD_version >= 1100000 423 ifp->if_get_counter = ptnet_get_counter; 424#endif 425 ifp->if_transmit = ptnet_transmit; 426 ifp->if_qflush = ptnet_qflush; 427 428 ifmedia_init(&sc->media, IFM_IMASK, ptnet_media_change, 429 ptnet_media_status); 430 ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX, 0, NULL); 431 ifmedia_set(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX); 432 433 macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_HI); 434 sc->hwaddr[0] = (macreg >> 8) & 0xff; 435 sc->hwaddr[1] = macreg & 0xff; 436 macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_LO); 437 sc->hwaddr[2] = (macreg >> 24) & 0xff; 438 sc->hwaddr[3] = (macreg >> 16) & 0xff; 439 sc->hwaddr[4] = (macreg >> 8) & 0xff; 440 sc->hwaddr[5] = macreg & 0xff; 441 442 ether_ifattach(ifp, sc->hwaddr); 443 444 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 445 ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU; 446 447 if (sc->ptfeatures & PTNETMAP_F_VNET_HDR) { 448 /* Similarly to what the vtnet driver does, we can emulate 449 * VLAN offloadings by inserting and removing the 802.1Q 450 * header during transmit and receive. We are then able 451 * to do checksum offloading of VLAN frames. */ 452 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 453 | IFCAP_VLAN_HWCSUM 454 | IFCAP_TSO | IFCAP_LRO 455 | IFCAP_VLAN_HWTSO 456 | IFCAP_VLAN_HWTAGGING; 457 } 458 459 ifp->if_capenable = ifp->if_capabilities; 460#ifdef DEVICE_POLLING 461 /* Don't enable polling by default. */ 462 ifp->if_capabilities |= IFCAP_POLLING; 463#endif 464 snprintf(sc->lock_name, sizeof(sc->lock_name), 465 "%s", device_get_nameunit(dev)); 466 mtx_init(&sc->lock, sc->lock_name, "ptnet core lock", MTX_DEF); 467 callout_init_mtx(&sc->tick, &sc->lock, 0); 468 469 /* Prepare a netmap_adapter struct instance to do netmap_attach(). */ 470 nifp_offset = bus_read_4(sc->iomem, PTNET_IO_NIFP_OFS); 471 memset(&na_arg, 0, sizeof(na_arg)); 472 na_arg.ifp = ifp; 473 na_arg.num_tx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); 474 na_arg.num_rx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); 475 na_arg.num_tx_rings = num_tx_rings; 476 na_arg.num_rx_rings = num_rx_rings; 477 na_arg.nm_config = ptnet_nm_config; 478 na_arg.nm_krings_create = ptnet_nm_krings_create; 479 na_arg.nm_krings_delete = ptnet_nm_krings_delete; 480 na_arg.nm_dtor = ptnet_nm_dtor; 481 na_arg.nm_intr = ptnet_nm_intr; 482 na_arg.nm_register = ptnet_nm_register; 483 na_arg.nm_txsync = ptnet_nm_txsync; 484 na_arg.nm_rxsync = ptnet_nm_rxsync; 485 486 netmap_pt_guest_attach(&na_arg, nifp_offset, 487 bus_read_4(sc->iomem, PTNET_IO_HOSTMEMID)); 488 489 /* Now a netmap adapter for this ifp has been allocated, and it 490 * can be accessed through NA(ifp). We also have to initialize the CSB 491 * pointer. */ 492 sc->ptna = (struct netmap_pt_guest_adapter *)NA(ifp); 493 494 /* If virtio-net header was negotiated, set the virt_hdr_len field in 495 * the netmap adapter, to inform users that this netmap adapter requires 496 * the application to deal with the headers. */ 497 ptnet_update_vnet_hdr(sc); 498 499 device_printf(dev, "%s() completed\n", __func__); 500 501 return (0); 502 503err_path: 504 ptnet_detach(dev); 505 return err; 506} 507 508/* Stop host sync-kloop if it was running. */ 509static void 510ptnet_device_shutdown(struct ptnet_softc *sc) 511{ 512 ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_DELETE); 513 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, 0); 514 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, 0); 515 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, 0); 516 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, 0); 517} 518 519static int 520ptnet_detach(device_t dev) 521{ 522 struct ptnet_softc *sc = device_get_softc(dev); 523 int i; 524 525 ptnet_device_shutdown(sc); 526 527#ifdef DEVICE_POLLING 528 if (sc->ifp->if_capenable & IFCAP_POLLING) { 529 ether_poll_deregister(sc->ifp); 530 } 531#endif 532 callout_drain(&sc->tick); 533 534 if (sc->queues) { 535 /* Drain taskqueues before calling if_detach. */ 536 for (i = 0; i < sc->num_rings; i++) { 537 struct ptnet_queue *pq = sc->queues + i; 538 539 if (pq->taskq) { 540 taskqueue_drain(pq->taskq, &pq->task); 541 } 542 } 543 } 544 545 if (sc->ifp) { 546 ether_ifdetach(sc->ifp); 547 548 /* Uninitialize netmap adapters for this device. */ 549 netmap_detach(sc->ifp); 550 551 ifmedia_removeall(&sc->media); 552 if_free(sc->ifp); 553 sc->ifp = NULL; 554 } 555 556 ptnet_irqs_fini(sc); 557 558 if (sc->csb_gh) { 559 contigfree(sc->csb_gh, 2*PAGE_SIZE, M_DEVBUF); 560 sc->csb_gh = NULL; 561 sc->csb_hg = NULL; 562 } 563 564 if (sc->queues) { 565 for (i = 0; i < sc->num_rings; i++) { 566 struct ptnet_queue *pq = sc->queues + i; 567 568 if (mtx_initialized(&pq->lock)) { 569 mtx_destroy(&pq->lock); 570 } 571 if (pq->bufring != NULL) { 572 buf_ring_free(pq->bufring, M_DEVBUF); 573 } 574 } 575 free(sc->queues, M_DEVBUF); 576 sc->queues = NULL; 577 } 578 579 if (sc->iomem) { 580 bus_release_resource(dev, SYS_RES_IOPORT, 581 PCIR_BAR(PTNETMAP_IO_PCI_BAR), sc->iomem); 582 sc->iomem = NULL; 583 } 584 585 mtx_destroy(&sc->lock); 586 587 device_printf(dev, "%s() completed\n", __func__); 588 589 return (0); 590} 591 592static int 593ptnet_suspend(device_t dev) 594{ 595 struct ptnet_softc *sc = device_get_softc(dev); 596 597 (void)sc; 598 599 return (0); 600} 601 602static int 603ptnet_resume(device_t dev) 604{ 605 struct ptnet_softc *sc = device_get_softc(dev); 606 607 (void)sc; 608 609 return (0); 610} 611 612static int 613ptnet_shutdown(device_t dev) 614{ 615 struct ptnet_softc *sc = device_get_softc(dev); 616 617 ptnet_device_shutdown(sc); 618 619 return (0); 620} 621 622static int 623ptnet_irqs_init(struct ptnet_softc *sc) 624{ 625 int rid = PCIR_BAR(PTNETMAP_MSIX_PCI_BAR); 626 int nvecs = sc->num_rings; 627 device_t dev = sc->dev; 628 int err = ENOSPC; 629 int cpu_cur; 630 int i; 631 632 if (pci_find_cap(dev, PCIY_MSIX, NULL) != 0) { 633 device_printf(dev, "Could not find MSI-X capability\n"); 634 return (ENXIO); 635 } 636 637 sc->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 638 &rid, RF_ACTIVE); 639 if (sc->msix_mem == NULL) { 640 device_printf(dev, "Failed to allocate MSIX PCI BAR\n"); 641 return (ENXIO); 642 } 643 644 if (pci_msix_count(dev) < nvecs) { 645 device_printf(dev, "Not enough MSI-X vectors\n"); 646 goto err_path; 647 } 648 649 err = pci_alloc_msix(dev, &nvecs); 650 if (err) { 651 device_printf(dev, "Failed to allocate MSI-X vectors\n"); 652 goto err_path; 653 } 654 655 for (i = 0; i < nvecs; i++) { 656 struct ptnet_queue *pq = sc->queues + i; 657 658 rid = i + 1; 659 pq->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 660 RF_ACTIVE); 661 if (pq->irq == NULL) { 662 device_printf(dev, "Failed to allocate interrupt " 663 "for queue #%d\n", i); 664 err = ENOSPC; 665 goto err_path; 666 } 667 } 668 669 cpu_cur = CPU_FIRST(); 670 for (i = 0; i < nvecs; i++) { 671 struct ptnet_queue *pq = sc->queues + i; 672 void (*handler)(void *) = ptnet_tx_intr; 673 674 if (i >= sc->num_tx_rings) { 675 handler = ptnet_rx_intr; 676 } 677 err = bus_setup_intr(dev, pq->irq, INTR_TYPE_NET | INTR_MPSAFE, 678 NULL /* intr_filter */, handler, 679 pq, &pq->cookie); 680 if (err) { 681 device_printf(dev, "Failed to register intr handler " 682 "for queue #%d\n", i); 683 goto err_path; 684 } 685 686 bus_describe_intr(dev, pq->irq, pq->cookie, "q%d", i); 687#if 0 688 bus_bind_intr(sc->dev, pq->irq, cpu_cur); 689#endif 690 cpu_cur = CPU_NEXT(cpu_cur); 691 } 692 693 device_printf(dev, "Allocated %d MSI-X vectors\n", nvecs); 694 695 cpu_cur = CPU_FIRST(); 696 for (i = 0; i < nvecs; i++) { 697 struct ptnet_queue *pq = sc->queues + i; 698 static void (*handler)(void *context, int pending); 699 700 handler = (i < sc->num_tx_rings) ? ptnet_tx_task : ptnet_rx_task; 701 702 TASK_INIT(&pq->task, 0, handler, pq); 703 pq->taskq = taskqueue_create_fast("ptnet_queue", M_NOWAIT, 704 taskqueue_thread_enqueue, &pq->taskq); 705 taskqueue_start_threads(&pq->taskq, 1, PI_NET, "%s-pq-%d", 706 device_get_nameunit(sc->dev), cpu_cur); 707 cpu_cur = CPU_NEXT(cpu_cur); 708 } 709 710 return 0; 711err_path: 712 ptnet_irqs_fini(sc); 713 return err; 714} 715 716static void 717ptnet_irqs_fini(struct ptnet_softc *sc) 718{ 719 device_t dev = sc->dev; 720 int i; 721 722 for (i = 0; i < sc->num_rings; i++) { 723 struct ptnet_queue *pq = sc->queues + i; 724 725 if (pq->taskq) { 726 taskqueue_free(pq->taskq); 727 pq->taskq = NULL; 728 } 729 730 if (pq->cookie) { 731 bus_teardown_intr(dev, pq->irq, pq->cookie); 732 pq->cookie = NULL; 733 } 734 735 if (pq->irq) { 736 bus_release_resource(dev, SYS_RES_IRQ, i + 1, pq->irq); 737 pq->irq = NULL; 738 } 739 } 740 741 if (sc->msix_mem) { 742 pci_release_msi(dev); 743 744 bus_release_resource(dev, SYS_RES_MEMORY, 745 PCIR_BAR(PTNETMAP_MSIX_PCI_BAR), 746 sc->msix_mem); 747 sc->msix_mem = NULL; 748 } 749} 750 751static void 752ptnet_init(void *opaque) 753{ 754 struct ptnet_softc *sc = opaque; 755 756 PTNET_CORE_LOCK(sc); 757 ptnet_init_locked(sc); 758 PTNET_CORE_UNLOCK(sc); 759} 760 761static int 762ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data) 763{ 764 struct ptnet_softc *sc = if_getsoftc(ifp); 765 device_t dev = sc->dev; 766 struct ifreq *ifr = (struct ifreq *)data; 767 int mask __unused, err = 0; 768 769 switch (cmd) { 770 case SIOCSIFFLAGS: 771 device_printf(dev, "SIOCSIFFLAGS %x\n", ifp->if_flags); 772 PTNET_CORE_LOCK(sc); 773 if (ifp->if_flags & IFF_UP) { 774 /* Network stack wants the iff to be up. */ 775 err = ptnet_init_locked(sc); 776 } else { 777 /* Network stack wants the iff to be down. */ 778 err = ptnet_stop(sc); 779 } 780 /* We don't need to do nothing to support IFF_PROMISC, 781 * since that is managed by the backend port. */ 782 PTNET_CORE_UNLOCK(sc); 783 break; 784 785 case SIOCSIFCAP: 786 device_printf(dev, "SIOCSIFCAP %x %x\n", 787 ifr->ifr_reqcap, ifp->if_capenable); 788 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 789#ifdef DEVICE_POLLING 790 if (mask & IFCAP_POLLING) { 791 struct ptnet_queue *pq; 792 int i; 793 794 if (ifr->ifr_reqcap & IFCAP_POLLING) { 795 err = ether_poll_register(ptnet_poll, ifp); 796 if (err) { 797 break; 798 } 799 /* Stop queues and sync with taskqueues. */ 800 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 801 for (i = 0; i < sc->num_rings; i++) { 802 pq = sc-> queues + i; 803 /* Make sure the worker sees the 804 * IFF_DRV_RUNNING down. */ 805 PTNET_Q_LOCK(pq); 806 pq->atok->appl_need_kick = 0; 807 PTNET_Q_UNLOCK(pq); 808 /* Wait for rescheduling to finish. */ 809 if (pq->taskq) { 810 taskqueue_drain(pq->taskq, 811 &pq->task); 812 } 813 } 814 ifp->if_drv_flags |= IFF_DRV_RUNNING; 815 } else { 816 err = ether_poll_deregister(ifp); 817 for (i = 0; i < sc->num_rings; i++) { 818 pq = sc-> queues + i; 819 PTNET_Q_LOCK(pq); 820 pq->atok->appl_need_kick = 1; 821 PTNET_Q_UNLOCK(pq); 822 } 823 } 824 } 825#endif /* DEVICE_POLLING */ 826 ifp->if_capenable = ifr->ifr_reqcap; 827 break; 828 829 case SIOCSIFMTU: 830 /* We support any reasonable MTU. */ 831 if (ifr->ifr_mtu < ETHERMIN || 832 ifr->ifr_mtu > PTNET_MAX_PKT_SIZE) { 833 err = EINVAL; 834 } else { 835 PTNET_CORE_LOCK(sc); 836 ifp->if_mtu = ifr->ifr_mtu; 837 PTNET_CORE_UNLOCK(sc); 838 } 839 break; 840 841 case SIOCSIFMEDIA: 842 case SIOCGIFMEDIA: 843 err = ifmedia_ioctl(ifp, ifr, &sc->media, cmd); 844 break; 845 846 default: 847 err = ether_ioctl(ifp, cmd, data); 848 break; 849 } 850 851 return err; 852} 853 854static int 855ptnet_init_locked(struct ptnet_softc *sc) 856{ 857 if_t ifp = sc->ifp; 858 struct netmap_adapter *na_dr = &sc->ptna->dr.up; 859 struct netmap_adapter *na_nm = &sc->ptna->hwup.up; 860 unsigned int nm_buf_size; 861 int ret; 862 863 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 864 return 0; /* nothing to do */ 865 } 866 867 device_printf(sc->dev, "%s\n", __func__); 868 869 /* Translate offload capabilities according to if_capenable. */ 870 ifp->if_hwassist = 0; 871 if (ifp->if_capenable & IFCAP_TXCSUM) 872 ifp->if_hwassist |= PTNET_CSUM_OFFLOAD; 873 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) 874 ifp->if_hwassist |= PTNET_CSUM_OFFLOAD_IPV6; 875 if (ifp->if_capenable & IFCAP_TSO4) 876 ifp->if_hwassist |= CSUM_IP_TSO; 877 if (ifp->if_capenable & IFCAP_TSO6) 878 ifp->if_hwassist |= CSUM_IP6_TSO; 879 880 /* 881 * Prepare the interface for netmap mode access. 882 */ 883 netmap_update_config(na_dr); 884 885 ret = netmap_mem_finalize(na_dr->nm_mem, na_dr); 886 if (ret) { 887 device_printf(sc->dev, "netmap_mem_finalize() failed\n"); 888 return ret; 889 } 890 891 if (sc->ptna->backend_users == 0) { 892 ret = ptnet_nm_krings_create(na_nm); 893 if (ret) { 894 device_printf(sc->dev, "ptnet_nm_krings_create() " 895 "failed\n"); 896 goto err_mem_finalize; 897 } 898 899 ret = netmap_mem_rings_create(na_dr); 900 if (ret) { 901 device_printf(sc->dev, "netmap_mem_rings_create() " 902 "failed\n"); 903 goto err_rings_create; 904 } 905 906 ret = netmap_mem_get_lut(na_dr->nm_mem, &na_dr->na_lut); 907 if (ret) { 908 device_printf(sc->dev, "netmap_mem_get_lut() " 909 "failed\n"); 910 goto err_get_lut; 911 } 912 } 913 914 ret = ptnet_nm_register(na_dr, 1 /* on */); 915 if (ret) { 916 goto err_register; 917 } 918 919 nm_buf_size = NETMAP_BUF_SIZE(na_dr); 920 921 KASSERT(nm_buf_size > 0, ("Invalid netmap buffer size")); 922 sc->min_tx_space = PTNET_MAX_PKT_SIZE / nm_buf_size + 2; 923 device_printf(sc->dev, "%s: min_tx_space = %u\n", __func__, 924 sc->min_tx_space); 925#ifdef PTNETMAP_STATS 926 callout_reset(&sc->tick, hz, ptnet_tick, sc); 927#endif 928 929 ifp->if_drv_flags |= IFF_DRV_RUNNING; 930 931 return 0; 932 933err_register: 934 memset(&na_dr->na_lut, 0, sizeof(na_dr->na_lut)); 935err_get_lut: 936 netmap_mem_rings_delete(na_dr); 937err_rings_create: 938 ptnet_nm_krings_delete(na_nm); 939err_mem_finalize: 940 netmap_mem_deref(na_dr->nm_mem, na_dr); 941 942 return ret; 943} 944 945/* To be called under core lock. */ 946static int 947ptnet_stop(struct ptnet_softc *sc) 948{ 949 if_t ifp = sc->ifp; 950 struct netmap_adapter *na_dr = &sc->ptna->dr.up; 951 struct netmap_adapter *na_nm = &sc->ptna->hwup.up; 952 int i; 953 954 device_printf(sc->dev, "%s\n", __func__); 955 956 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 957 return 0; /* nothing to do */ 958 } 959 960 /* Clear the driver-ready flag, and synchronize with all the queues, 961 * so that after this loop we are sure nobody is working anymore with 962 * the device. This scheme is taken from the vtnet driver. */ 963 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 964 callout_stop(&sc->tick); 965 for (i = 0; i < sc->num_rings; i++) { 966 PTNET_Q_LOCK(sc->queues + i); 967 PTNET_Q_UNLOCK(sc->queues + i); 968 } 969 970 ptnet_nm_register(na_dr, 0 /* off */); 971 972 if (sc->ptna->backend_users == 0) { 973 netmap_mem_rings_delete(na_dr); 974 ptnet_nm_krings_delete(na_nm); 975 } 976 netmap_mem_deref(na_dr->nm_mem, na_dr); 977 978 return 0; 979} 980 981static void 982ptnet_qflush(if_t ifp) 983{ 984 struct ptnet_softc *sc = if_getsoftc(ifp); 985 int i; 986 987 /* Flush all the bufrings and do the interface flush. */ 988 for (i = 0; i < sc->num_rings; i++) { 989 struct ptnet_queue *pq = sc->queues + i; 990 struct mbuf *m; 991 992 PTNET_Q_LOCK(pq); 993 if (pq->bufring) { 994 while ((m = buf_ring_dequeue_sc(pq->bufring))) { 995 m_freem(m); 996 } 997 } 998 PTNET_Q_UNLOCK(pq); 999 } 1000 1001 if_qflush(ifp); 1002} 1003 1004static int 1005ptnet_media_change(if_t ifp) 1006{ 1007 struct ptnet_softc *sc = if_getsoftc(ifp); 1008 struct ifmedia *ifm = &sc->media; 1009 1010 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { 1011 return EINVAL; 1012 } 1013 1014 return 0; 1015} 1016 1017#if __FreeBSD_version >= 1100000 1018static uint64_t 1019ptnet_get_counter(if_t ifp, ift_counter cnt) 1020{ 1021 struct ptnet_softc *sc = if_getsoftc(ifp); 1022 struct ptnet_queue_stats stats[2]; 1023 int i; 1024 1025 /* Accumulate statistics over the queues. */ 1026 memset(stats, 0, sizeof(stats)); 1027 for (i = 0; i < sc->num_rings; i++) { 1028 struct ptnet_queue *pq = sc->queues + i; 1029 int idx = (i < sc->num_tx_rings) ? 0 : 1; 1030 1031 stats[idx].packets += pq->stats.packets; 1032 stats[idx].bytes += pq->stats.bytes; 1033 stats[idx].errors += pq->stats.errors; 1034 stats[idx].iqdrops += pq->stats.iqdrops; 1035 stats[idx].mcasts += pq->stats.mcasts; 1036 } 1037 1038 switch (cnt) { 1039 case IFCOUNTER_IPACKETS: 1040 return (stats[1].packets); 1041 case IFCOUNTER_IQDROPS: 1042 return (stats[1].iqdrops); 1043 case IFCOUNTER_IERRORS: 1044 return (stats[1].errors); 1045 case IFCOUNTER_OPACKETS: 1046 return (stats[0].packets); 1047 case IFCOUNTER_OBYTES: 1048 return (stats[0].bytes); 1049 case IFCOUNTER_OMCASTS: 1050 return (stats[0].mcasts); 1051 default: 1052 return (if_get_counter_default(ifp, cnt)); 1053 } 1054} 1055#endif 1056 1057 1058#ifdef PTNETMAP_STATS 1059/* Called under core lock. */ 1060static void 1061ptnet_tick(void *opaque) 1062{ 1063 struct ptnet_softc *sc = opaque; 1064 int i; 1065 1066 for (i = 0; i < sc->num_rings; i++) { 1067 struct ptnet_queue *pq = sc->queues + i; 1068 struct ptnet_queue_stats cur = pq->stats; 1069 struct timeval now; 1070 unsigned int delta; 1071 1072 microtime(&now); 1073 delta = now.tv_usec - sc->last_ts.tv_usec + 1074 (now.tv_sec - sc->last_ts.tv_sec) * 1000000; 1075 delta /= 1000; /* in milliseconds */ 1076 1077 if (delta == 0) 1078 continue; 1079 1080 device_printf(sc->dev, "#%d[%u ms]:pkts %lu, kicks %lu, " 1081 "intr %lu\n", i, delta, 1082 (cur.packets - pq->last_stats.packets), 1083 (cur.kicks - pq->last_stats.kicks), 1084 (cur.intrs - pq->last_stats.intrs)); 1085 pq->last_stats = cur; 1086 } 1087 microtime(&sc->last_ts); 1088 callout_schedule(&sc->tick, hz); 1089} 1090#endif /* PTNETMAP_STATS */ 1091 1092static void 1093ptnet_media_status(if_t ifp, struct ifmediareq *ifmr) 1094{ 1095 /* We are always active, as the backend netmap port is 1096 * always open in netmap mode. */ 1097 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; 1098 ifmr->ifm_active = IFM_ETHER | IFM_10G_T | IFM_FDX; 1099} 1100 1101static uint32_t 1102ptnet_nm_ptctl(struct ptnet_softc *sc, uint32_t cmd) 1103{ 1104 /* 1105 * Write a command and read back error status, 1106 * with zero meaning success. 1107 */ 1108 bus_write_4(sc->iomem, PTNET_IO_PTCTL, cmd); 1109 return bus_read_4(sc->iomem, PTNET_IO_PTCTL); 1110} 1111 1112static int 1113ptnet_nm_config(struct netmap_adapter *na, struct nm_config_info *info) 1114{ 1115 struct ptnet_softc *sc = if_getsoftc(na->ifp); 1116 1117 info->num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); 1118 info->num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); 1119 info->num_tx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); 1120 info->num_rx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); 1121 info->rx_buf_maxsize = NETMAP_BUF_SIZE(na); 1122 1123 device_printf(sc->dev, "txr %u, rxr %u, txd %u, rxd %u, rxbufsz %u\n", 1124 info->num_tx_rings, info->num_rx_rings, 1125 info->num_tx_descs, info->num_rx_descs, 1126 info->rx_buf_maxsize); 1127 1128 return 0; 1129} 1130 1131static void 1132ptnet_sync_from_csb(struct ptnet_softc *sc, struct netmap_adapter *na) 1133{ 1134 int i; 1135 1136 /* Sync krings from the host, reading from 1137 * CSB. */ 1138 for (i = 0; i < sc->num_rings; i++) { 1139 struct nm_csb_atok *atok = sc->queues[i].atok; 1140 struct nm_csb_ktoa *ktoa = sc->queues[i].ktoa; 1141 struct netmap_kring *kring; 1142 1143 if (i < na->num_tx_rings) { 1144 kring = na->tx_rings[i]; 1145 } else { 1146 kring = na->rx_rings[i - na->num_tx_rings]; 1147 } 1148 kring->rhead = kring->ring->head = atok->head; 1149 kring->rcur = kring->ring->cur = atok->cur; 1150 kring->nr_hwcur = ktoa->hwcur; 1151 kring->nr_hwtail = kring->rtail = 1152 kring->ring->tail = ktoa->hwtail; 1153 1154 nm_prdis("%d,%d: csb {hc %u h %u c %u ht %u}", t, i, 1155 ktoa->hwcur, atok->head, atok->cur, 1156 ktoa->hwtail); 1157 nm_prdis("%d,%d: kring {hc %u rh %u rc %u h %u c %u ht %u rt %u t %u}", 1158 t, i, kring->nr_hwcur, kring->rhead, kring->rcur, 1159 kring->ring->head, kring->ring->cur, kring->nr_hwtail, 1160 kring->rtail, kring->ring->tail); 1161 } 1162} 1163 1164static void 1165ptnet_update_vnet_hdr(struct ptnet_softc *sc) 1166{ 1167 unsigned int wanted_hdr_len = ptnet_vnet_hdr ? PTNET_HDR_SIZE : 0; 1168 1169 bus_write_4(sc->iomem, PTNET_IO_VNET_HDR_LEN, wanted_hdr_len); 1170 sc->vnet_hdr_len = bus_read_4(sc->iomem, PTNET_IO_VNET_HDR_LEN); 1171 sc->ptna->hwup.up.virt_hdr_len = sc->vnet_hdr_len; 1172} 1173 1174static int 1175ptnet_nm_register(struct netmap_adapter *na, int onoff) 1176{ 1177 /* device-specific */ 1178 if_t ifp = na->ifp; 1179 struct ptnet_softc *sc = if_getsoftc(ifp); 1180 int native = (na == &sc->ptna->hwup.up); 1181 struct ptnet_queue *pq; 1182 int ret = 0; 1183 int i; 1184 1185 if (!onoff) { 1186 sc->ptna->backend_users--; 1187 } 1188 1189 /* If this is the last netmap client, guest interrupt enable flags may 1190 * be in arbitrary state. Since these flags are going to be used also 1191 * by the netdevice driver, we have to make sure to start with 1192 * notifications enabled. Also, schedule NAPI to flush pending packets 1193 * in the RX rings, since we will not receive further interrupts 1194 * until these will be processed. */ 1195 if (native && !onoff && na->active_fds == 0) { 1196 nm_prinf("Exit netmap mode, re-enable interrupts"); 1197 for (i = 0; i < sc->num_rings; i++) { 1198 pq = sc->queues + i; 1199 pq->atok->appl_need_kick = 1; 1200 } 1201 } 1202 1203 if (onoff) { 1204 if (sc->ptna->backend_users == 0) { 1205 /* Initialize notification enable fields in the CSB. */ 1206 for (i = 0; i < sc->num_rings; i++) { 1207 pq = sc->queues + i; 1208 pq->ktoa->kern_need_kick = 1; 1209 pq->atok->appl_need_kick = 1210 (!(ifp->if_capenable & IFCAP_POLLING) 1211 && i >= sc->num_tx_rings); 1212 } 1213 1214 /* Set the virtio-net header length. */ 1215 ptnet_update_vnet_hdr(sc); 1216 1217 /* Make sure the host adapter passed through is ready 1218 * for txsync/rxsync. */ 1219 ret = ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_CREATE); 1220 if (ret) { 1221 return ret; 1222 } 1223 1224 /* Align the guest krings and rings to the state stored 1225 * in the CSB. */ 1226 ptnet_sync_from_csb(sc, na); 1227 } 1228 1229 /* If not native, don't call nm_set_native_flags, since we don't want 1230 * to replace if_transmit method, nor set NAF_NETMAP_ON */ 1231 if (native) { 1232 netmap_krings_mode_commit(na, onoff); 1233 nm_set_native_flags(na); 1234 } 1235 1236 } else { 1237 if (native) { 1238 nm_clear_native_flags(na); 1239 netmap_krings_mode_commit(na, onoff); 1240 } 1241 1242 if (sc->ptna->backend_users == 0) { 1243 ret = ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_DELETE); 1244 } 1245 } 1246 1247 if (onoff) { 1248 sc->ptna->backend_users++; 1249 } 1250 1251 return ret; 1252} 1253 1254static int 1255ptnet_nm_txsync(struct netmap_kring *kring, int flags) 1256{ 1257 struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); 1258 struct ptnet_queue *pq = sc->queues + kring->ring_id; 1259 bool notify; 1260 1261 notify = netmap_pt_guest_txsync(pq->atok, pq->ktoa, kring, flags); 1262 if (notify) { 1263 ptnet_kick(pq); 1264 } 1265 1266 return 0; 1267} 1268 1269static int 1270ptnet_nm_rxsync(struct netmap_kring *kring, int flags) 1271{ 1272 struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); 1273 struct ptnet_queue *pq = sc->rxqueues + kring->ring_id; 1274 bool notify; 1275 1276 notify = netmap_pt_guest_rxsync(pq->atok, pq->ktoa, kring, flags); 1277 if (notify) { 1278 ptnet_kick(pq); 1279 } 1280 1281 return 0; 1282} 1283 1284static void 1285ptnet_nm_intr(struct netmap_adapter *na, int onoff) 1286{ 1287 struct ptnet_softc *sc = if_getsoftc(na->ifp); 1288 int i; 1289 1290 for (i = 0; i < sc->num_rings; i++) { 1291 struct ptnet_queue *pq = sc->queues + i; 1292 pq->atok->appl_need_kick = onoff; 1293 } 1294} 1295 1296static void 1297ptnet_tx_intr(void *opaque) 1298{ 1299 struct ptnet_queue *pq = opaque; 1300 struct ptnet_softc *sc = pq->sc; 1301 1302 DBG(device_printf(sc->dev, "Tx interrupt #%d\n", pq->kring_id)); 1303#ifdef PTNETMAP_STATS 1304 pq->stats.intrs ++; 1305#endif /* PTNETMAP_STATS */ 1306 1307 if (netmap_tx_irq(sc->ifp, pq->kring_id) != NM_IRQ_PASS) { 1308 return; 1309 } 1310 1311 /* Schedule the tasqueue to flush process transmissions requests. 1312 * However, vtnet, if_em and if_igb just call ptnet_transmit() here, 1313 * at least when using MSI-X interrupts. The if_em driver, instead 1314 * schedule taskqueue when using legacy interrupts. */ 1315 taskqueue_enqueue(pq->taskq, &pq->task); 1316} 1317 1318static void 1319ptnet_rx_intr(void *opaque) 1320{ 1321 struct ptnet_queue *pq = opaque; 1322 struct ptnet_softc *sc = pq->sc; 1323 unsigned int unused; 1324 1325 DBG(device_printf(sc->dev, "Rx interrupt #%d\n", pq->kring_id)); 1326#ifdef PTNETMAP_STATS 1327 pq->stats.intrs ++; 1328#endif /* PTNETMAP_STATS */ 1329 1330 if (netmap_rx_irq(sc->ifp, pq->kring_id, &unused) != NM_IRQ_PASS) { 1331 return; 1332 } 1333 1334 /* Like vtnet, if_igb and if_em drivers when using MSI-X interrupts, 1335 * receive-side processing is executed directly in the interrupt 1336 * service routine. Alternatively, we may schedule the taskqueue. */ 1337 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true); 1338} 1339 1340/* The following offloadings-related functions are taken from the vtnet 1341 * driver, but the same functionality is required for the ptnet driver. 1342 * As a temporary solution, I copied this code from vtnet and I started 1343 * to generalize it (taking away driver-specific statistic accounting), 1344 * making as little modifications as possible. 1345 * In the future we need to share these functions between vtnet and ptnet. 1346 */ 1347static int 1348ptnet_tx_offload_ctx(struct mbuf *m, int *etype, int *proto, int *start) 1349{ 1350 struct ether_vlan_header *evh; 1351 int offset; 1352 1353 evh = mtod(m, struct ether_vlan_header *); 1354 if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 1355 /* BMV: We should handle nested VLAN tags too. */ 1356 *etype = ntohs(evh->evl_proto); 1357 offset = sizeof(struct ether_vlan_header); 1358 } else { 1359 *etype = ntohs(evh->evl_encap_proto); 1360 offset = sizeof(struct ether_header); 1361 } 1362 1363 switch (*etype) { 1364#if defined(INET) 1365 case ETHERTYPE_IP: { 1366 struct ip *ip, iphdr; 1367 if (__predict_false(m->m_len < offset + sizeof(struct ip))) { 1368 m_copydata(m, offset, sizeof(struct ip), 1369 (caddr_t) &iphdr); 1370 ip = &iphdr; 1371 } else 1372 ip = (struct ip *)(m->m_data + offset); 1373 *proto = ip->ip_p; 1374 *start = offset + (ip->ip_hl << 2); 1375 break; 1376 } 1377#endif 1378#if defined(INET6) 1379 case ETHERTYPE_IPV6: 1380 *proto = -1; 1381 *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto); 1382 /* Assert the network stack sent us a valid packet. */ 1383 KASSERT(*start > offset, 1384 ("%s: mbuf %p start %d offset %d proto %d", __func__, m, 1385 *start, offset, *proto)); 1386 break; 1387#endif 1388 default: 1389 /* Here we should increment the tx_csum_bad_ethtype counter. */ 1390 return (EINVAL); 1391 } 1392 1393 return (0); 1394} 1395 1396static int 1397ptnet_tx_offload_tso(if_t ifp, struct mbuf *m, int eth_type, 1398 int offset, bool allow_ecn, struct virtio_net_hdr *hdr) 1399{ 1400 static struct timeval lastecn; 1401 static int curecn; 1402 struct tcphdr *tcp, tcphdr; 1403 1404 if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) { 1405 m_copydata(m, offset, sizeof(struct tcphdr), (caddr_t) &tcphdr); 1406 tcp = &tcphdr; 1407 } else 1408 tcp = (struct tcphdr *)(m->m_data + offset); 1409 1410 hdr->hdr_len = offset + (tcp->th_off << 2); 1411 hdr->gso_size = m->m_pkthdr.tso_segsz; 1412 hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 : 1413 VIRTIO_NET_HDR_GSO_TCPV6; 1414 1415 if (tcp->th_flags & TH_CWR) { 1416 /* 1417 * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In FreeBSD, 1418 * ECN support is not on a per-interface basis, but globally via 1419 * the net.inet.tcp.ecn.enable sysctl knob. The default is off. 1420 */ 1421 if (!allow_ecn) { 1422 if (ppsratecheck(&lastecn, &curecn, 1)) 1423 if_printf(ifp, 1424 "TSO with ECN not negotiated with host\n"); 1425 return (ENOTSUP); 1426 } 1427 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; 1428 } 1429 1430 /* Here we should increment tx_tso counter. */ 1431 1432 return (0); 1433} 1434 1435static struct mbuf * 1436ptnet_tx_offload(if_t ifp, struct mbuf *m, bool allow_ecn, 1437 struct virtio_net_hdr *hdr) 1438{ 1439 int flags, etype, csum_start, proto, error; 1440 1441 flags = m->m_pkthdr.csum_flags; 1442 1443 error = ptnet_tx_offload_ctx(m, &etype, &proto, &csum_start); 1444 if (error) 1445 goto drop; 1446 1447 if ((etype == ETHERTYPE_IP && flags & PTNET_CSUM_OFFLOAD) || 1448 (etype == ETHERTYPE_IPV6 && flags & PTNET_CSUM_OFFLOAD_IPV6)) { 1449 /* 1450 * We could compare the IP protocol vs the CSUM_ flag too, 1451 * but that really should not be necessary. 1452 */ 1453 hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM; 1454 hdr->csum_start = csum_start; 1455 hdr->csum_offset = m->m_pkthdr.csum_data; 1456 /* Here we should increment the tx_csum counter. */ 1457 } 1458 1459 if (flags & CSUM_TSO) { 1460 if (__predict_false(proto != IPPROTO_TCP)) { 1461 /* Likely failed to correctly parse the mbuf. 1462 * Here we should increment the tx_tso_not_tcp 1463 * counter. */ 1464 goto drop; 1465 } 1466 1467 KASSERT(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM, 1468 ("%s: mbuf %p TSO without checksum offload %#x", 1469 __func__, m, flags)); 1470 1471 error = ptnet_tx_offload_tso(ifp, m, etype, csum_start, 1472 allow_ecn, hdr); 1473 if (error) 1474 goto drop; 1475 } 1476 1477 return (m); 1478 1479drop: 1480 m_freem(m); 1481 return (NULL); 1482} 1483 1484static void 1485ptnet_vlan_tag_remove(struct mbuf *m) 1486{ 1487 struct ether_vlan_header *evh; 1488 1489 evh = mtod(m, struct ether_vlan_header *); 1490 m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag); 1491 m->m_flags |= M_VLANTAG; 1492 1493 /* Strip the 802.1Q header. */ 1494 bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN, 1495 ETHER_HDR_LEN - ETHER_TYPE_LEN); 1496 m_adj(m, ETHER_VLAN_ENCAP_LEN); 1497} 1498 1499/* 1500 * Use the checksum offset in the VirtIO header to set the 1501 * correct CSUM_* flags. 1502 */ 1503static int 1504ptnet_rx_csum_by_offset(struct mbuf *m, uint16_t eth_type, int ip_start, 1505 struct virtio_net_hdr *hdr) 1506{ 1507#if defined(INET) || defined(INET6) 1508 int offset = hdr->csum_start + hdr->csum_offset; 1509#endif 1510 1511 /* Only do a basic sanity check on the offset. */ 1512 switch (eth_type) { 1513#if defined(INET) 1514 case ETHERTYPE_IP: 1515 if (__predict_false(offset < ip_start + sizeof(struct ip))) 1516 return (1); 1517 break; 1518#endif 1519#if defined(INET6) 1520 case ETHERTYPE_IPV6: 1521 if (__predict_false(offset < ip_start + sizeof(struct ip6_hdr))) 1522 return (1); 1523 break; 1524#endif 1525 default: 1526 /* Here we should increment the rx_csum_bad_ethtype counter. */ 1527 return (1); 1528 } 1529 1530 /* 1531 * Use the offset to determine the appropriate CSUM_* flags. This is 1532 * a bit dirty, but we can get by with it since the checksum offsets 1533 * happen to be different. We assume the host host does not do IPv4 1534 * header checksum offloading. 1535 */ 1536 switch (hdr->csum_offset) { 1537 case offsetof(struct udphdr, uh_sum): 1538 case offsetof(struct tcphdr, th_sum): 1539 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1540 m->m_pkthdr.csum_data = 0xFFFF; 1541 break; 1542 default: 1543 /* Here we should increment the rx_csum_bad_offset counter. */ 1544 return (1); 1545 } 1546 1547 return (0); 1548} 1549 1550static int 1551ptnet_rx_csum_by_parse(struct mbuf *m, uint16_t eth_type, int ip_start, 1552 struct virtio_net_hdr *hdr) 1553{ 1554 int offset, proto; 1555 1556 switch (eth_type) { 1557#if defined(INET) 1558 case ETHERTYPE_IP: { 1559 struct ip *ip; 1560 if (__predict_false(m->m_len < ip_start + sizeof(struct ip))) 1561 return (1); 1562 ip = (struct ip *)(m->m_data + ip_start); 1563 proto = ip->ip_p; 1564 offset = ip_start + (ip->ip_hl << 2); 1565 break; 1566 } 1567#endif 1568#if defined(INET6) 1569 case ETHERTYPE_IPV6: 1570 if (__predict_false(m->m_len < ip_start + 1571 sizeof(struct ip6_hdr))) 1572 return (1); 1573 offset = ip6_lasthdr(m, ip_start, IPPROTO_IPV6, &proto); 1574 if (__predict_false(offset < 0)) 1575 return (1); 1576 break; 1577#endif 1578 default: 1579 /* Here we should increment the rx_csum_bad_ethtype counter. */ 1580 return (1); 1581 } 1582 1583 switch (proto) { 1584 case IPPROTO_TCP: 1585 if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) 1586 return (1); 1587 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1588 m->m_pkthdr.csum_data = 0xFFFF; 1589 break; 1590 case IPPROTO_UDP: 1591 if (__predict_false(m->m_len < offset + sizeof(struct udphdr))) 1592 return (1); 1593 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1594 m->m_pkthdr.csum_data = 0xFFFF; 1595 break; 1596 default: 1597 /* 1598 * For the remaining protocols, FreeBSD does not support 1599 * checksum offloading, so the checksum will be recomputed. 1600 */ 1601#if 0 1602 if_printf(ifp, "cksum offload of unsupported " 1603 "protocol eth_type=%#x proto=%d csum_start=%d " 1604 "csum_offset=%d\n", __func__, eth_type, proto, 1605 hdr->csum_start, hdr->csum_offset); 1606#endif 1607 break; 1608 } 1609 1610 return (0); 1611} 1612 1613/* 1614 * Set the appropriate CSUM_* flags. Unfortunately, the information 1615 * provided is not directly useful to us. The VirtIO header gives the 1616 * offset of the checksum, which is all Linux needs, but this is not 1617 * how FreeBSD does things. We are forced to peek inside the packet 1618 * a bit. 1619 * 1620 * It would be nice if VirtIO gave us the L4 protocol or if FreeBSD 1621 * could accept the offsets and let the stack figure it out. 1622 */ 1623static int 1624ptnet_rx_csum(struct mbuf *m, struct virtio_net_hdr *hdr) 1625{ 1626 struct ether_header *eh; 1627 struct ether_vlan_header *evh; 1628 uint16_t eth_type; 1629 int offset, error; 1630 1631 eh = mtod(m, struct ether_header *); 1632 eth_type = ntohs(eh->ether_type); 1633 if (eth_type == ETHERTYPE_VLAN) { 1634 /* BMV: We should handle nested VLAN tags too. */ 1635 evh = mtod(m, struct ether_vlan_header *); 1636 eth_type = ntohs(evh->evl_proto); 1637 offset = sizeof(struct ether_vlan_header); 1638 } else 1639 offset = sizeof(struct ether_header); 1640 1641 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) 1642 error = ptnet_rx_csum_by_offset(m, eth_type, offset, hdr); 1643 else 1644 error = ptnet_rx_csum_by_parse(m, eth_type, offset, hdr); 1645 1646 return (error); 1647} 1648/* End of offloading-related functions to be shared with vtnet. */ 1649 1650static void 1651ptnet_ring_update(struct ptnet_queue *pq, struct netmap_kring *kring, 1652 unsigned int head, unsigned int sync_flags) 1653{ 1654 struct netmap_ring *ring = kring->ring; 1655 struct nm_csb_atok *atok = pq->atok; 1656 struct nm_csb_ktoa *ktoa = pq->ktoa; 1657 1658 /* Some packets have been pushed to the netmap ring. We have 1659 * to tell the host to process the new packets, updating cur 1660 * and head in the CSB. */ 1661 ring->head = ring->cur = head; 1662 1663 /* Mimic nm_txsync_prologue/nm_rxsync_prologue. */ 1664 kring->rcur = kring->rhead = head; 1665 1666 nm_sync_kloop_appl_write(atok, kring->rcur, kring->rhead); 1667 1668 /* Kick the host if needed. */ 1669 if (NM_ACCESS_ONCE(ktoa->kern_need_kick)) { 1670 atok->sync_flags = sync_flags; 1671 ptnet_kick(pq); 1672 } 1673} 1674 1675#define PTNET_TX_NOSPACE(_h, _k, _min) \ 1676 ((((_h) < (_k)->rtail) ? 0 : (_k)->nkr_num_slots) + \ 1677 (_k)->rtail - (_h)) < (_min) 1678 1679/* This function may be called by the network stack, or by 1680 * by the taskqueue thread. */ 1681static int 1682ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget, 1683 bool may_resched) 1684{ 1685 struct ptnet_softc *sc = pq->sc; 1686 bool have_vnet_hdr = sc->vnet_hdr_len; 1687 struct netmap_adapter *na = &sc->ptna->dr.up; 1688 if_t ifp = sc->ifp; 1689 unsigned int batch_count = 0; 1690 struct nm_csb_atok *atok; 1691 struct nm_csb_ktoa *ktoa; 1692 struct netmap_kring *kring; 1693 struct netmap_ring *ring; 1694 struct netmap_slot *slot; 1695 unsigned int count = 0; 1696 unsigned int minspace; 1697 unsigned int head; 1698 unsigned int lim; 1699 struct mbuf *mhead; 1700 struct mbuf *mf; 1701 int nmbuf_bytes; 1702 uint8_t *nmbuf; 1703 1704 if (!PTNET_Q_TRYLOCK(pq)) { 1705 /* We failed to acquire the lock, schedule the taskqueue. */ 1706 nm_prlim(1, "Deferring TX work"); 1707 if (may_resched) { 1708 taskqueue_enqueue(pq->taskq, &pq->task); 1709 } 1710 1711 return 0; 1712 } 1713 1714 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { 1715 PTNET_Q_UNLOCK(pq); 1716 nm_prlim(1, "Interface is down"); 1717 return ENETDOWN; 1718 } 1719 1720 atok = pq->atok; 1721 ktoa = pq->ktoa; 1722 kring = na->tx_rings[pq->kring_id]; 1723 ring = kring->ring; 1724 lim = kring->nkr_num_slots - 1; 1725 head = ring->head; 1726 minspace = sc->min_tx_space; 1727 1728 while (count < budget) { 1729 if (PTNET_TX_NOSPACE(head, kring, minspace)) { 1730 /* We ran out of slot, let's see if the host has 1731 * freed up some, by reading hwcur and hwtail from 1732 * the CSB. */ 1733 ptnet_sync_tail(ktoa, kring); 1734 1735 if (PTNET_TX_NOSPACE(head, kring, minspace)) { 1736 /* Still no slots available. Reactivate the 1737 * interrupts so that we can be notified 1738 * when some free slots are made available by 1739 * the host. */ 1740 atok->appl_need_kick = 1; 1741 1742 /* Double check. We need a full barrier to 1743 * prevent the store to atok->appl_need_kick 1744 * to be reordered with the load from 1745 * ktoa->hwcur and ktoa->hwtail (store-load 1746 * barrier). */ 1747 nm_stld_barrier(); 1748 ptnet_sync_tail(ktoa, kring); 1749 if (likely(PTNET_TX_NOSPACE(head, kring, 1750 minspace))) { 1751 break; 1752 } 1753 1754 nm_prlim(1, "Found more slots by doublecheck"); 1755 /* More slots were freed before reactivating 1756 * the interrupts. */ 1757 atok->appl_need_kick = 0; 1758 } 1759 } 1760 1761 mhead = drbr_peek(ifp, pq->bufring); 1762 if (!mhead) { 1763 break; 1764 } 1765 1766 /* Initialize transmission state variables. */ 1767 slot = ring->slot + head; 1768 nmbuf = NMB(na, slot); 1769 nmbuf_bytes = 0; 1770 1771 /* If needed, prepare the virtio-net header at the beginning 1772 * of the first slot. */ 1773 if (have_vnet_hdr) { 1774 struct virtio_net_hdr *vh = 1775 (struct virtio_net_hdr *)nmbuf; 1776 1777 /* For performance, we could replace this memset() with 1778 * two 8-bytes-wide writes. */ 1779 memset(nmbuf, 0, PTNET_HDR_SIZE); 1780 if (mhead->m_pkthdr.csum_flags & PTNET_ALL_OFFLOAD) { 1781 mhead = ptnet_tx_offload(ifp, mhead, false, 1782 vh); 1783 if (unlikely(!mhead)) { 1784 /* Packet dropped because errors 1785 * occurred while preparing the vnet 1786 * header. Let's go ahead with the next 1787 * packet. */ 1788 pq->stats.errors ++; 1789 drbr_advance(ifp, pq->bufring); 1790 continue; 1791 } 1792 } 1793 nm_prdis(1, "%s: [csum_flags %lX] vnet hdr: flags %x " 1794 "csum_start %u csum_ofs %u hdr_len = %u " 1795 "gso_size %u gso_type %x", __func__, 1796 mhead->m_pkthdr.csum_flags, vh->flags, 1797 vh->csum_start, vh->csum_offset, vh->hdr_len, 1798 vh->gso_size, vh->gso_type); 1799 1800 nmbuf += PTNET_HDR_SIZE; 1801 nmbuf_bytes += PTNET_HDR_SIZE; 1802 } 1803 1804 for (mf = mhead; mf; mf = mf->m_next) { 1805 uint8_t *mdata = mf->m_data; 1806 int mlen = mf->m_len; 1807 1808 for (;;) { 1809 int copy = NETMAP_BUF_SIZE(na) - nmbuf_bytes; 1810 1811 if (mlen < copy) { 1812 copy = mlen; 1813 } 1814 memcpy(nmbuf, mdata, copy); 1815 1816 mdata += copy; 1817 mlen -= copy; 1818 nmbuf += copy; 1819 nmbuf_bytes += copy; 1820 1821 if (!mlen) { 1822 break; 1823 } 1824 1825 slot->len = nmbuf_bytes; 1826 slot->flags = NS_MOREFRAG; 1827 1828 head = nm_next(head, lim); 1829 KASSERT(head != ring->tail, 1830 ("Unexpectedly run out of TX space")); 1831 slot = ring->slot + head; 1832 nmbuf = NMB(na, slot); 1833 nmbuf_bytes = 0; 1834 } 1835 } 1836 1837 /* Complete last slot and update head. */ 1838 slot->len = nmbuf_bytes; 1839 slot->flags = 0; 1840 head = nm_next(head, lim); 1841 1842 /* Consume the packet just processed. */ 1843 drbr_advance(ifp, pq->bufring); 1844 1845 /* Copy the packet to listeners. */ 1846 ETHER_BPF_MTAP(ifp, mhead); 1847 1848 pq->stats.packets ++; 1849 pq->stats.bytes += mhead->m_pkthdr.len; 1850 if (mhead->m_flags & M_MCAST) { 1851 pq->stats.mcasts ++; 1852 } 1853 1854 m_freem(mhead); 1855 1856 count ++; 1857 if (++batch_count == PTNET_TX_BATCH) { 1858 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM); 1859 batch_count = 0; 1860 } 1861 } 1862 1863 if (batch_count) { 1864 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM); 1865 } 1866 1867 if (count >= budget && may_resched) { 1868 DBG(nm_prlim(1, "out of budget: resched, %d mbufs pending\n", 1869 drbr_inuse(ifp, pq->bufring))); 1870 taskqueue_enqueue(pq->taskq, &pq->task); 1871 } 1872 1873 PTNET_Q_UNLOCK(pq); 1874 1875 return count; 1876} 1877 1878static int 1879ptnet_transmit(if_t ifp, struct mbuf *m) 1880{ 1881 struct ptnet_softc *sc = if_getsoftc(ifp); 1882 struct ptnet_queue *pq; 1883 unsigned int queue_idx; 1884 int err; 1885 1886 DBG(device_printf(sc->dev, "transmit %p\n", m)); 1887 1888 /* Insert 802.1Q header if needed. */ 1889 if (m->m_flags & M_VLANTAG) { 1890 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); 1891 if (m == NULL) { 1892 return ENOBUFS; 1893 } 1894 m->m_flags &= ~M_VLANTAG; 1895 } 1896 1897 /* Get the flow-id if available. */ 1898 queue_idx = (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) ? 1899 m->m_pkthdr.flowid : curcpu; 1900 1901 if (unlikely(queue_idx >= sc->num_tx_rings)) { 1902 queue_idx %= sc->num_tx_rings; 1903 } 1904 1905 pq = sc->queues + queue_idx; 1906 1907 err = drbr_enqueue(ifp, pq->bufring, m); 1908 if (err) { 1909 /* ENOBUFS when the bufring is full */ 1910 nm_prlim(1, "%s: drbr_enqueue() failed %d\n", 1911 __func__, err); 1912 pq->stats.errors ++; 1913 return err; 1914 } 1915 1916 if (ifp->if_capenable & IFCAP_POLLING) { 1917 /* If polling is on, the transmit queues will be 1918 * drained by the poller. */ 1919 return 0; 1920 } 1921 1922 err = ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true); 1923 1924 return (err < 0) ? err : 0; 1925} 1926 1927static unsigned int 1928ptnet_rx_discard(struct netmap_kring *kring, unsigned int head) 1929{ 1930 struct netmap_ring *ring = kring->ring; 1931 struct netmap_slot *slot = ring->slot + head; 1932 1933 for (;;) { 1934 head = nm_next(head, kring->nkr_num_slots - 1); 1935 if (!(slot->flags & NS_MOREFRAG) || head == ring->tail) { 1936 break; 1937 } 1938 slot = ring->slot + head; 1939 } 1940 1941 return head; 1942} 1943 1944static inline struct mbuf * 1945ptnet_rx_slot(struct mbuf *mtail, uint8_t *nmbuf, unsigned int nmbuf_len) 1946{ 1947 uint8_t *mdata = mtod(mtail, uint8_t *) + mtail->m_len; 1948 1949 do { 1950 unsigned int copy; 1951 1952 if (mtail->m_len == MCLBYTES) { 1953 struct mbuf *mf; 1954 1955 mf = m_getcl(M_NOWAIT, MT_DATA, 0); 1956 if (unlikely(!mf)) { 1957 return NULL; 1958 } 1959 1960 mtail->m_next = mf; 1961 mtail = mf; 1962 mdata = mtod(mtail, uint8_t *); 1963 mtail->m_len = 0; 1964 } 1965 1966 copy = MCLBYTES - mtail->m_len; 1967 if (nmbuf_len < copy) { 1968 copy = nmbuf_len; 1969 } 1970 1971 memcpy(mdata, nmbuf, copy); 1972 1973 nmbuf += copy; 1974 nmbuf_len -= copy; 1975 mdata += copy; 1976 mtail->m_len += copy; 1977 } while (nmbuf_len); 1978 1979 return mtail; 1980} 1981 1982static int 1983ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched) 1984{ 1985 struct ptnet_softc *sc = pq->sc; 1986 bool have_vnet_hdr = sc->vnet_hdr_len; 1987 struct nm_csb_atok *atok = pq->atok; 1988 struct nm_csb_ktoa *ktoa = pq->ktoa; 1989 struct netmap_adapter *na = &sc->ptna->dr.up; 1990 struct netmap_kring *kring = na->rx_rings[pq->kring_id]; 1991 struct netmap_ring *ring = kring->ring; 1992 unsigned int const lim = kring->nkr_num_slots - 1; 1993 unsigned int batch_count = 0; 1994 if_t ifp = sc->ifp; 1995 unsigned int count = 0; 1996 uint32_t head; 1997 1998 PTNET_Q_LOCK(pq); 1999 2000 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { 2001 goto unlock; 2002 } 2003 2004 kring->nr_kflags &= ~NKR_PENDINTR; 2005 2006 head = ring->head; 2007 while (count < budget) { 2008 uint32_t prev_head = head; 2009 struct mbuf *mhead, *mtail; 2010 struct virtio_net_hdr *vh; 2011 struct netmap_slot *slot; 2012 unsigned int nmbuf_len; 2013 uint8_t *nmbuf; 2014 int deliver = 1; /* the mbuf to the network stack. */ 2015host_sync: 2016 if (head == ring->tail) { 2017 /* We ran out of slot, let's see if the host has 2018 * added some, by reading hwcur and hwtail from 2019 * the CSB. */ 2020 ptnet_sync_tail(ktoa, kring); 2021 2022 if (head == ring->tail) { 2023 /* Still no slots available. Reactivate 2024 * interrupts as they were disabled by the 2025 * host thread right before issuing the 2026 * last interrupt. */ 2027 atok->appl_need_kick = 1; 2028 2029 /* Double check for more completed RX slots. 2030 * We need a full barrier to prevent the store 2031 * to atok->appl_need_kick to be reordered with 2032 * the load from ktoa->hwcur and ktoa->hwtail 2033 * (store-load barrier). */ 2034 nm_stld_barrier(); 2035 ptnet_sync_tail(ktoa, kring); 2036 if (likely(head == ring->tail)) { 2037 break; 2038 } 2039 atok->appl_need_kick = 0; 2040 } 2041 } 2042 2043 /* Initialize ring state variables, possibly grabbing the 2044 * virtio-net header. */ 2045 slot = ring->slot + head; 2046 nmbuf = NMB(na, slot); 2047 nmbuf_len = slot->len; 2048 2049 vh = (struct virtio_net_hdr *)nmbuf; 2050 if (have_vnet_hdr) { 2051 if (unlikely(nmbuf_len < PTNET_HDR_SIZE)) { 2052 /* There is no good reason why host should 2053 * put the header in multiple netmap slots. 2054 * If this is the case, discard. */ 2055 nm_prlim(1, "Fragmented vnet-hdr: dropping"); 2056 head = ptnet_rx_discard(kring, head); 2057 pq->stats.iqdrops ++; 2058 deliver = 0; 2059 goto skip; 2060 } 2061 nm_prdis(1, "%s: vnet hdr: flags %x csum_start %u " 2062 "csum_ofs %u hdr_len = %u gso_size %u " 2063 "gso_type %x", __func__, vh->flags, 2064 vh->csum_start, vh->csum_offset, vh->hdr_len, 2065 vh->gso_size, vh->gso_type); 2066 nmbuf += PTNET_HDR_SIZE; 2067 nmbuf_len -= PTNET_HDR_SIZE; 2068 } 2069 2070 /* Allocate the head of a new mbuf chain. 2071 * We use m_getcl() to allocate an mbuf with standard cluster 2072 * size (MCLBYTES). In the future we could use m_getjcl() 2073 * to choose different sizes. */ 2074 mhead = mtail = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 2075 if (unlikely(mhead == NULL)) { 2076 device_printf(sc->dev, "%s: failed to allocate mbuf " 2077 "head\n", __func__); 2078 pq->stats.errors ++; 2079 break; 2080 } 2081 2082 /* Initialize the mbuf state variables. */ 2083 mhead->m_pkthdr.len = nmbuf_len; 2084 mtail->m_len = 0; 2085 2086 /* Scan all the netmap slots containing the current packet. */ 2087 for (;;) { 2088 DBG(device_printf(sc->dev, "%s: h %u t %u rcv frag " 2089 "len %u, flags %u\n", __func__, 2090 head, ring->tail, slot->len, 2091 slot->flags)); 2092 2093 mtail = ptnet_rx_slot(mtail, nmbuf, nmbuf_len); 2094 if (unlikely(!mtail)) { 2095 /* Ouch. We ran out of memory while processing 2096 * a packet. We have to restore the previous 2097 * head position, free the mbuf chain, and 2098 * schedule the taskqueue to give the packet 2099 * another chance. */ 2100 device_printf(sc->dev, "%s: failed to allocate" 2101 " mbuf frag, reset head %u --> %u\n", 2102 __func__, head, prev_head); 2103 head = prev_head; 2104 m_freem(mhead); 2105 pq->stats.errors ++; 2106 if (may_resched) { 2107 taskqueue_enqueue(pq->taskq, 2108 &pq->task); 2109 } 2110 goto escape; 2111 } 2112 2113 /* We have to increment head irrespective of the 2114 * NS_MOREFRAG being set or not. */ 2115 head = nm_next(head, lim); 2116 2117 if (!(slot->flags & NS_MOREFRAG)) { 2118 break; 2119 } 2120 2121 if (unlikely(head == ring->tail)) { 2122 /* The very last slot prepared by the host has 2123 * the NS_MOREFRAG set. Drop it and continue 2124 * the outer cycle (to do the double-check). */ 2125 nm_prlim(1, "Incomplete packet: dropping"); 2126 m_freem(mhead); 2127 pq->stats.iqdrops ++; 2128 goto host_sync; 2129 } 2130 2131 slot = ring->slot + head; 2132 nmbuf = NMB(na, slot); 2133 nmbuf_len = slot->len; 2134 mhead->m_pkthdr.len += nmbuf_len; 2135 } 2136 2137 mhead->m_pkthdr.rcvif = ifp; 2138 mhead->m_pkthdr.csum_flags = 0; 2139 2140 /* Store the queue idx in the packet header. */ 2141 mhead->m_pkthdr.flowid = pq->kring_id; 2142 M_HASHTYPE_SET(mhead, M_HASHTYPE_OPAQUE); 2143 2144 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 2145 struct ether_header *eh; 2146 2147 eh = mtod(mhead, struct ether_header *); 2148 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 2149 ptnet_vlan_tag_remove(mhead); 2150 /* 2151 * With the 802.1Q header removed, update the 2152 * checksum starting location accordingly. 2153 */ 2154 if (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) 2155 vh->csum_start -= ETHER_VLAN_ENCAP_LEN; 2156 } 2157 } 2158 2159 if (have_vnet_hdr && (vh->flags & (VIRTIO_NET_HDR_F_NEEDS_CSUM 2160 | VIRTIO_NET_HDR_F_DATA_VALID))) { 2161 if (unlikely(ptnet_rx_csum(mhead, vh))) { 2162 m_freem(mhead); 2163 nm_prlim(1, "Csum offload error: dropping"); 2164 pq->stats.iqdrops ++; 2165 deliver = 0; 2166 } 2167 } 2168 2169skip: 2170 count ++; 2171 if (++batch_count >= PTNET_RX_BATCH) { 2172 /* Some packets have been (or will be) pushed to the network 2173 * stack. We need to update the CSB to tell the host about 2174 * the new ring->cur and ring->head (RX buffer refill). */ 2175 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ); 2176 batch_count = 0; 2177 } 2178 2179 if (likely(deliver)) { 2180 pq->stats.packets ++; 2181 pq->stats.bytes += mhead->m_pkthdr.len; 2182 2183 PTNET_Q_UNLOCK(pq); 2184 (*ifp->if_input)(ifp, mhead); 2185 PTNET_Q_LOCK(pq); 2186 /* The ring->head index (and related indices) are 2187 * updated under pq lock by ptnet_ring_update(). 2188 * Since we dropped the lock to call if_input(), we 2189 * must reload ring->head and restart processing the 2190 * ring from there. */ 2191 head = ring->head; 2192 2193 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { 2194 /* The interface has gone down while we didn't 2195 * have the lock. Stop any processing and exit. */ 2196 goto unlock; 2197 } 2198 } 2199 } 2200escape: 2201 if (batch_count) { 2202 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ); 2203 2204 } 2205 2206 if (count >= budget && may_resched) { 2207 /* If we ran out of budget or the double-check found new 2208 * slots to process, schedule the taskqueue. */ 2209 DBG(nm_prlim(1, "out of budget: resched h %u t %u\n", 2210 head, ring->tail)); 2211 taskqueue_enqueue(pq->taskq, &pq->task); 2212 } 2213unlock: 2214 PTNET_Q_UNLOCK(pq); 2215 2216 return count; 2217} 2218 2219static void 2220ptnet_rx_task(void *context, int pending) 2221{ 2222 struct ptnet_queue *pq = context; 2223 2224 DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id)); 2225 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true); 2226} 2227 2228static void 2229ptnet_tx_task(void *context, int pending) 2230{ 2231 struct ptnet_queue *pq = context; 2232 2233 DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id)); 2234 ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true); 2235} 2236 2237#ifdef DEVICE_POLLING 2238/* We don't need to handle differently POLL_AND_CHECK_STATUS and 2239 * POLL_ONLY, since we don't have an Interrupt Status Register. */ 2240static int 2241ptnet_poll(if_t ifp, enum poll_cmd cmd, int budget) 2242{ 2243 struct ptnet_softc *sc = if_getsoftc(ifp); 2244 unsigned int queue_budget; 2245 unsigned int count = 0; 2246 bool borrow = false; 2247 int i; 2248 2249 KASSERT(sc->num_rings > 0, ("Found no queues in while polling ptnet")); 2250 queue_budget = MAX(budget / sc->num_rings, 1); 2251 nm_prlim(1, "Per-queue budget is %d", queue_budget); 2252 2253 while (budget) { 2254 unsigned int rcnt = 0; 2255 2256 for (i = 0; i < sc->num_rings; i++) { 2257 struct ptnet_queue *pq = sc->queues + i; 2258 2259 if (borrow) { 2260 queue_budget = MIN(queue_budget, budget); 2261 if (queue_budget == 0) { 2262 break; 2263 } 2264 } 2265 2266 if (i < sc->num_tx_rings) { 2267 rcnt += ptnet_drain_transmit_queue(pq, 2268 queue_budget, false); 2269 } else { 2270 rcnt += ptnet_rx_eof(pq, queue_budget, 2271 false); 2272 } 2273 } 2274 2275 if (!rcnt) { 2276 /* A scan of the queues gave no result, we can 2277 * stop here. */ 2278 break; 2279 } 2280 2281 if (rcnt > budget) { 2282 /* This may happen when initial budget < sc->num_rings, 2283 * since one packet budget is given to each queue 2284 * anyway. Just pretend we didn't eat "so much". */ 2285 rcnt = budget; 2286 } 2287 count += rcnt; 2288 budget -= rcnt; 2289 borrow = true; 2290 } 2291 2292 2293 return count; 2294} 2295#endif /* DEVICE_POLLING */ 2296#endif /* WITH_PTNETMAP */ 2297