1/* 2 * Copyright (C) 2013-2016 Vincenzo Maffione 3 * Copyright (C) 2013-2016 Luigi Rizzo 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28/* 29 * This module implements netmap support on top of standard, 30 * unmodified device drivers. 31 * 32 * A NIOCREGIF request is handled here if the device does not 33 * have native support. TX and RX rings are emulated as follows: 34 * 35 * NIOCREGIF 36 * We preallocate a block of TX mbufs (roughly as many as 37 * tx descriptors; the number is not critical) to speed up 38 * operation during transmissions. The refcount on most of 39 * these buffers is artificially bumped up so we can recycle 40 * them more easily. Also, the destructor is intercepted 41 * so we use it as an interrupt notification to wake up 42 * processes blocked on a poll(). 43 * 44 * For each receive ring we allocate one "struct mbq" 45 * (an mbuf tailq plus a spinlock). We intercept packets 46 * (through if_input) 47 * on the receive path and put them in the mbq from which 48 * netmap receive routines can grab them. 49 * 50 * TX: 51 * in the generic_txsync() routine, netmap buffers are copied 52 * (or linked, in a future) to the preallocated mbufs 53 * and pushed to the transmit queue. Some of these mbufs 54 * (those with NS_REPORT, or otherwise every half ring) 55 * have the refcount=1, others have refcount=2. 56 * When the destructor is invoked, we take that as 57 * a notification that all mbufs up to that one in 58 * the specific ring have been completed, and generate 59 * the equivalent of a transmit interrupt. 60 * 61 * RX: 62 * 63 */ 64 65#ifdef __FreeBSD__ 66 67#include <sys/cdefs.h> /* prerequisite */ 68__FBSDID("$FreeBSD: stable/11/sys/dev/netmap/netmap_generic.c 350007 2019-07-15 20:14:30Z vmaffione $"); 69 70#include <sys/types.h> 71#include <sys/errno.h> 72#include <sys/malloc.h> 73#include <sys/lock.h> /* PROT_EXEC */ 74#include <sys/rwlock.h> 75#include <sys/socket.h> /* sockaddrs */ 76#include <sys/selinfo.h> 77#include <net/if.h> 78#include <net/if_types.h> 79#include <net/if_var.h> 80#include <machine/bus.h> /* bus_dmamap_* in netmap_kern.h */ 81 82#include <net/netmap.h> 83#include <dev/netmap/netmap_kern.h> 84#include <dev/netmap/netmap_mem2.h> 85 86#define MBUF_RXQ(m) ((m)->m_pkthdr.flowid) 87#define smp_mb() 88 89#elif defined _WIN32 90 91#include "win_glue.h" 92 93#define MBUF_TXQ(m) 0//((m)->m_pkthdr.flowid) 94#define MBUF_RXQ(m) 0//((m)->m_pkthdr.flowid) 95#define smp_mb() //XXX: to be correctly defined 96 97#else /* linux */ 98 99#include "bsd_glue.h" 100 101#include <linux/ethtool.h> /* struct ethtool_ops, get_ringparam */ 102#include <linux/hrtimer.h> 103 104static inline struct mbuf * 105nm_os_get_mbuf(struct ifnet *ifp, int len) 106{ 107 return alloc_skb(ifp->needed_headroom + len + 108 ifp->needed_tailroom, GFP_ATOMIC); 109} 110 111#endif /* linux */ 112 113 114/* Common headers. */ 115#include <net/netmap.h> 116#include <dev/netmap/netmap_kern.h> 117#include <dev/netmap/netmap_mem2.h> 118 119 120#define for_each_kring_n(_i, _k, _karr, _n) \ 121 for ((_k)=*(_karr), (_i) = 0; (_i) < (_n); (_i)++, (_k) = (_karr)[(_i)]) 122 123#define for_each_tx_kring(_i, _k, _na) \ 124 for_each_kring_n(_i, _k, (_na)->tx_rings, (_na)->num_tx_rings) 125#define for_each_tx_kring_h(_i, _k, _na) \ 126 for_each_kring_n(_i, _k, (_na)->tx_rings, (_na)->num_tx_rings + 1) 127 128#define for_each_rx_kring(_i, _k, _na) \ 129 for_each_kring_n(_i, _k, (_na)->rx_rings, (_na)->num_rx_rings) 130#define for_each_rx_kring_h(_i, _k, _na) \ 131 for_each_kring_n(_i, _k, (_na)->rx_rings, (_na)->num_rx_rings + 1) 132 133 134/* ======================== PERFORMANCE STATISTICS =========================== */ 135 136#ifdef RATE_GENERIC 137#define IFRATE(x) x 138struct rate_stats { 139 unsigned long txpkt; 140 unsigned long txsync; 141 unsigned long txirq; 142 unsigned long txrepl; 143 unsigned long txdrop; 144 unsigned long rxpkt; 145 unsigned long rxirq; 146 unsigned long rxsync; 147}; 148 149struct rate_context { 150 unsigned refcount; 151 struct timer_list timer; 152 struct rate_stats new; 153 struct rate_stats old; 154}; 155 156#define RATE_PRINTK(_NAME_) \ 157 printk( #_NAME_ " = %lu Hz\n", (cur._NAME_ - ctx->old._NAME_)/RATE_PERIOD); 158#define RATE_PERIOD 2 159static void rate_callback(unsigned long arg) 160{ 161 struct rate_context * ctx = (struct rate_context *)arg; 162 struct rate_stats cur = ctx->new; 163 int r; 164 165 RATE_PRINTK(txpkt); 166 RATE_PRINTK(txsync); 167 RATE_PRINTK(txirq); 168 RATE_PRINTK(txrepl); 169 RATE_PRINTK(txdrop); 170 RATE_PRINTK(rxpkt); 171 RATE_PRINTK(rxsync); 172 RATE_PRINTK(rxirq); 173 printk("\n"); 174 175 ctx->old = cur; 176 r = mod_timer(&ctx->timer, jiffies + 177 msecs_to_jiffies(RATE_PERIOD * 1000)); 178 if (unlikely(r)) 179 nm_prerr("mod_timer() failed"); 180} 181 182static struct rate_context rate_ctx; 183 184void generic_rate(int txp, int txs, int txi, int rxp, int rxs, int rxi) 185{ 186 if (txp) rate_ctx.new.txpkt++; 187 if (txs) rate_ctx.new.txsync++; 188 if (txi) rate_ctx.new.txirq++; 189 if (rxp) rate_ctx.new.rxpkt++; 190 if (rxs) rate_ctx.new.rxsync++; 191 if (rxi) rate_ctx.new.rxirq++; 192} 193 194#else /* !RATE */ 195#define IFRATE(x) 196#endif /* !RATE */ 197 198 199/* ========== GENERIC (EMULATED) NETMAP ADAPTER SUPPORT ============= */ 200 201/* 202 * Wrapper used by the generic adapter layer to notify 203 * the poller threads. Differently from netmap_rx_irq(), we check 204 * only NAF_NETMAP_ON instead of NAF_NATIVE_ON to enable the irq. 205 */ 206void 207netmap_generic_irq(struct netmap_adapter *na, u_int q, u_int *work_done) 208{ 209 if (unlikely(!nm_netmap_on(na))) 210 return; 211 212 netmap_common_irq(na, q, work_done); 213#ifdef RATE_GENERIC 214 if (work_done) 215 rate_ctx.new.rxirq++; 216 else 217 rate_ctx.new.txirq++; 218#endif /* RATE_GENERIC */ 219} 220 221static int 222generic_netmap_unregister(struct netmap_adapter *na) 223{ 224 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; 225 struct netmap_kring *kring = NULL; 226 int i, r; 227 228 if (na->active_fds == 0) { 229 na->na_flags &= ~NAF_NETMAP_ON; 230 231 /* Stop intercepting packets on the RX path. */ 232 nm_os_catch_rx(gna, 0); 233 234 /* Release packet steering control. */ 235 nm_os_catch_tx(gna, 0); 236 } 237 238 netmap_krings_mode_commit(na, /*onoff=*/0); 239 240 for_each_rx_kring(r, kring, na) { 241 /* Free the mbufs still pending in the RX queues, 242 * that did not end up into the corresponding netmap 243 * RX rings. */ 244 mbq_safe_purge(&kring->rx_queue); 245 nm_os_mitigation_cleanup(&gna->mit[r]); 246 } 247 248 /* Decrement reference counter for the mbufs in the 249 * TX pools. These mbufs can be still pending in drivers, 250 * (e.g. this happens with virtio-net driver, which 251 * does lazy reclaiming of transmitted mbufs). */ 252 for_each_tx_kring(r, kring, na) { 253 /* We must remove the destructor on the TX event, 254 * because the destructor invokes netmap code, and 255 * the netmap module may disappear before the 256 * TX event is consumed. */ 257 mtx_lock_spin(&kring->tx_event_lock); 258 if (kring->tx_event) { 259 SET_MBUF_DESTRUCTOR(kring->tx_event, NULL); 260 } 261 kring->tx_event = NULL; 262 mtx_unlock_spin(&kring->tx_event_lock); 263 } 264 265 if (na->active_fds == 0) { 266 nm_os_free(gna->mit); 267 268 for_each_rx_kring(r, kring, na) { 269 mbq_safe_fini(&kring->rx_queue); 270 } 271 272 for_each_tx_kring(r, kring, na) { 273 mtx_destroy(&kring->tx_event_lock); 274 if (kring->tx_pool == NULL) { 275 continue; 276 } 277 278 for (i=0; i<na->num_tx_desc; i++) { 279 if (kring->tx_pool[i]) { 280 m_freem(kring->tx_pool[i]); 281 } 282 } 283 nm_os_free(kring->tx_pool); 284 kring->tx_pool = NULL; 285 } 286 287#ifdef RATE_GENERIC 288 if (--rate_ctx.refcount == 0) { 289 nm_prinf("del_timer()"); 290 del_timer(&rate_ctx.timer); 291 } 292#endif 293 nm_prinf("Emulated adapter for %s deactivated", na->name); 294 } 295 296 return 0; 297} 298 299/* Enable/disable netmap mode for a generic network interface. */ 300static int 301generic_netmap_register(struct netmap_adapter *na, int enable) 302{ 303 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; 304 struct netmap_kring *kring = NULL; 305 int error; 306 int i, r; 307 308 if (!na) { 309 return EINVAL; 310 } 311 312 if (!enable) { 313 /* This is actually an unregif. */ 314 return generic_netmap_unregister(na); 315 } 316 317 if (na->active_fds == 0) { 318 nm_prinf("Emulated adapter for %s activated", na->name); 319 /* Do all memory allocations when (na->active_fds == 0), to 320 * simplify error management. */ 321 322 /* Allocate memory for mitigation support on all the rx queues. */ 323 gna->mit = nm_os_malloc(na->num_rx_rings * sizeof(struct nm_generic_mit)); 324 if (!gna->mit) { 325 nm_prerr("mitigation allocation failed"); 326 error = ENOMEM; 327 goto out; 328 } 329 330 for_each_rx_kring(r, kring, na) { 331 /* Init mitigation support. */ 332 nm_os_mitigation_init(&gna->mit[r], r, na); 333 334 /* Initialize the rx queue, as generic_rx_handler() can 335 * be called as soon as nm_os_catch_rx() returns. 336 */ 337 mbq_safe_init(&kring->rx_queue); 338 } 339 340 /* 341 * Prepare mbuf pools (parallel to the tx rings), for packet 342 * transmission. Don't preallocate the mbufs here, it's simpler 343 * to leave this task to txsync. 344 */ 345 for_each_tx_kring(r, kring, na) { 346 kring->tx_pool = NULL; 347 } 348 for_each_tx_kring(r, kring, na) { 349 kring->tx_pool = 350 nm_os_malloc(na->num_tx_desc * sizeof(struct mbuf *)); 351 if (!kring->tx_pool) { 352 nm_prerr("tx_pool allocation failed"); 353 error = ENOMEM; 354 goto free_tx_pools; 355 } 356 mtx_init(&kring->tx_event_lock, "tx_event_lock", 357 NULL, MTX_SPIN); 358 } 359 } 360 361 netmap_krings_mode_commit(na, /*onoff=*/1); 362 363 for_each_tx_kring(r, kring, na) { 364 /* Initialize tx_pool and tx_event. */ 365 for (i=0; i<na->num_tx_desc; i++) { 366 kring->tx_pool[i] = NULL; 367 } 368 369 kring->tx_event = NULL; 370 } 371 372 if (na->active_fds == 0) { 373 /* Prepare to intercept incoming traffic. */ 374 error = nm_os_catch_rx(gna, 1); 375 if (error) { 376 nm_prerr("nm_os_catch_rx(1) failed (%d)", error); 377 goto free_tx_pools; 378 } 379 380 /* Let netmap control the packet steering. */ 381 error = nm_os_catch_tx(gna, 1); 382 if (error) { 383 nm_prerr("nm_os_catch_tx(1) failed (%d)", error); 384 goto catch_rx; 385 } 386 387 na->na_flags |= NAF_NETMAP_ON; 388 389#ifdef RATE_GENERIC 390 if (rate_ctx.refcount == 0) { 391 nm_prinf("setup_timer()"); 392 memset(&rate_ctx, 0, sizeof(rate_ctx)); 393 setup_timer(&rate_ctx.timer, &rate_callback, (unsigned long)&rate_ctx); 394 if (mod_timer(&rate_ctx.timer, jiffies + msecs_to_jiffies(1500))) { 395 nm_prerr("Error: mod_timer()"); 396 } 397 } 398 rate_ctx.refcount++; 399#endif /* RATE */ 400 } 401 402 return 0; 403 404 /* Here (na->active_fds == 0) holds. */ 405catch_rx: 406 nm_os_catch_rx(gna, 0); 407free_tx_pools: 408 for_each_tx_kring(r, kring, na) { 409 mtx_destroy(&kring->tx_event_lock); 410 if (kring->tx_pool == NULL) { 411 continue; 412 } 413 nm_os_free(kring->tx_pool); 414 kring->tx_pool = NULL; 415 } 416 for_each_rx_kring(r, kring, na) { 417 mbq_safe_fini(&kring->rx_queue); 418 } 419 nm_os_free(gna->mit); 420out: 421 422 return error; 423} 424 425/* 426 * Callback invoked when the device driver frees an mbuf used 427 * by netmap to transmit a packet. This usually happens when 428 * the NIC notifies the driver that transmission is completed. 429 */ 430static void 431generic_mbuf_destructor(struct mbuf *m) 432{ 433 struct netmap_adapter *na = NA(GEN_TX_MBUF_IFP(m)); 434 struct netmap_kring *kring; 435 unsigned int r = MBUF_TXQ(m); 436 unsigned int r_orig = r; 437 438 if (unlikely(!nm_netmap_on(na) || r >= na->num_tx_rings)) { 439 nm_prerr("Error: no netmap adapter on device %p", 440 GEN_TX_MBUF_IFP(m)); 441 return; 442 } 443 444 /* 445 * First, clear the event mbuf. 446 * In principle, the event 'm' should match the one stored 447 * on ring 'r'. However we check it explicitely to stay 448 * safe against lower layers (qdisc, driver, etc.) changing 449 * MBUF_TXQ(m) under our feet. If the match is not found 450 * on 'r', we try to see if it belongs to some other ring. 451 */ 452 for (;;) { 453 bool match = false; 454 455 kring = na->tx_rings[r]; 456 mtx_lock_spin(&kring->tx_event_lock); 457 if (kring->tx_event == m) { 458 kring->tx_event = NULL; 459 match = true; 460 } 461 mtx_unlock_spin(&kring->tx_event_lock); 462 463 if (match) { 464 if (r != r_orig) { 465 nm_prlim(1, "event %p migrated: ring %u --> %u", 466 m, r_orig, r); 467 } 468 break; 469 } 470 471 if (++r == na->num_tx_rings) r = 0; 472 473 if (r == r_orig) { 474 nm_prlim(1, "Cannot match event %p", m); 475 return; 476 } 477 } 478 479 /* Second, wake up clients. They will reclaim the event through 480 * txsync. */ 481 netmap_generic_irq(na, r, NULL); 482#ifdef __FreeBSD__ 483#if __FreeBSD_version <= 1200050 484 void_mbuf_dtor(m, NULL, NULL); 485#else /* __FreeBSD_version >= 1200051 */ 486 void_mbuf_dtor(m); 487#endif /* __FreeBSD_version >= 1200051 */ 488#endif 489} 490 491/* Record completed transmissions and update hwtail. 492 * 493 * The oldest tx buffer not yet completed is at nr_hwtail + 1, 494 * nr_hwcur is the first unsent buffer. 495 */ 496static u_int 497generic_netmap_tx_clean(struct netmap_kring *kring, int txqdisc) 498{ 499 u_int const lim = kring->nkr_num_slots - 1; 500 u_int nm_i = nm_next(kring->nr_hwtail, lim); 501 u_int hwcur = kring->nr_hwcur; 502 u_int n = 0; 503 struct mbuf **tx_pool = kring->tx_pool; 504 505 nm_prdis("hwcur = %d, hwtail = %d", kring->nr_hwcur, kring->nr_hwtail); 506 507 while (nm_i != hwcur) { /* buffers not completed */ 508 struct mbuf *m = tx_pool[nm_i]; 509 510 if (txqdisc) { 511 if (m == NULL) { 512 /* Nothing to do, this is going 513 * to be replenished. */ 514 nm_prlim(3, "Is this happening?"); 515 516 } else if (MBUF_QUEUED(m)) { 517 break; /* Not dequeued yet. */ 518 519 } else if (MBUF_REFCNT(m) != 1) { 520 /* This mbuf has been dequeued but is still busy 521 * (refcount is 2). 522 * Leave it to the driver and replenish. */ 523 m_freem(m); 524 tx_pool[nm_i] = NULL; 525 } 526 527 } else { 528 if (unlikely(m == NULL)) { 529 int event_consumed; 530 531 /* This slot was used to place an event. */ 532 mtx_lock_spin(&kring->tx_event_lock); 533 event_consumed = (kring->tx_event == NULL); 534 mtx_unlock_spin(&kring->tx_event_lock); 535 if (!event_consumed) { 536 /* The event has not been consumed yet, 537 * still busy in the driver. */ 538 break; 539 } 540 /* The event has been consumed, we can go 541 * ahead. */ 542 543 } else if (MBUF_REFCNT(m) != 1) { 544 /* This mbuf is still busy: its refcnt is 2. */ 545 break; 546 } 547 } 548 549 n++; 550 nm_i = nm_next(nm_i, lim); 551 } 552 kring->nr_hwtail = nm_prev(nm_i, lim); 553 nm_prdis("tx completed [%d] -> hwtail %d", n, kring->nr_hwtail); 554 555 return n; 556} 557 558/* Compute a slot index in the middle between inf and sup. */ 559static inline u_int 560ring_middle(u_int inf, u_int sup, u_int lim) 561{ 562 u_int n = lim + 1; 563 u_int e; 564 565 if (sup >= inf) { 566 e = (sup + inf) / 2; 567 } else { /* wrap around */ 568 e = (sup + n + inf) / 2; 569 if (e >= n) { 570 e -= n; 571 } 572 } 573 574 if (unlikely(e >= n)) { 575 nm_prerr("This cannot happen"); 576 e = 0; 577 } 578 579 return e; 580} 581 582static void 583generic_set_tx_event(struct netmap_kring *kring, u_int hwcur) 584{ 585 u_int lim = kring->nkr_num_slots - 1; 586 struct mbuf *m; 587 u_int e; 588 u_int ntc = nm_next(kring->nr_hwtail, lim); /* next to clean */ 589 590 if (ntc == hwcur) { 591 return; /* all buffers are free */ 592 } 593 594 /* 595 * We have pending packets in the driver between hwtail+1 596 * and hwcur, and we have to chose one of these slot to 597 * generate a notification. 598 * There is a race but this is only called within txsync which 599 * does a double check. 600 */ 601#if 0 602 /* Choose a slot in the middle, so that we don't risk ending 603 * up in a situation where the client continuously wake up, 604 * fills one or a few TX slots and go to sleep again. */ 605 e = ring_middle(ntc, hwcur, lim); 606#else 607 /* Choose the first pending slot, to be safe against driver 608 * reordering mbuf transmissions. */ 609 e = ntc; 610#endif 611 612 m = kring->tx_pool[e]; 613 if (m == NULL) { 614 /* An event is already in place. */ 615 return; 616 } 617 618 mtx_lock_spin(&kring->tx_event_lock); 619 if (kring->tx_event) { 620 /* An event is already in place. */ 621 mtx_unlock_spin(&kring->tx_event_lock); 622 return; 623 } 624 625 SET_MBUF_DESTRUCTOR(m, generic_mbuf_destructor); 626 kring->tx_event = m; 627 mtx_unlock_spin(&kring->tx_event_lock); 628 629 kring->tx_pool[e] = NULL; 630 631 nm_prdis("Request Event at %d mbuf %p refcnt %d", e, m, m ? MBUF_REFCNT(m) : -2 ); 632 633 /* Decrement the refcount. This will free it if we lose the race 634 * with the driver. */ 635 m_freem(m); 636 smp_mb(); 637} 638 639 640/* 641 * generic_netmap_txsync() transforms netmap buffers into mbufs 642 * and passes them to the standard device driver 643 * (ndo_start_xmit() or ifp->if_transmit() ). 644 * On linux this is not done directly, but using dev_queue_xmit(), 645 * since it implements the TX flow control (and takes some locks). 646 */ 647static int 648generic_netmap_txsync(struct netmap_kring *kring, int flags) 649{ 650 struct netmap_adapter *na = kring->na; 651 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; 652 struct ifnet *ifp = na->ifp; 653 struct netmap_ring *ring = kring->ring; 654 u_int nm_i; /* index into the netmap ring */ // j 655 u_int const lim = kring->nkr_num_slots - 1; 656 u_int const head = kring->rhead; 657 u_int ring_nr = kring->ring_id; 658 659 IFRATE(rate_ctx.new.txsync++); 660 661 rmb(); 662 663 /* 664 * First part: process new packets to send. 665 */ 666 nm_i = kring->nr_hwcur; 667 if (nm_i != head) { /* we have new packets to send */ 668 struct nm_os_gen_arg a; 669 u_int event = -1; 670 671 if (gna->txqdisc && nm_kr_txempty(kring)) { 672 /* In txqdisc mode, we ask for a delayed notification, 673 * but only when cur == hwtail, which means that the 674 * client is going to block. */ 675 event = ring_middle(nm_i, head, lim); 676 nm_prdis("Place txqdisc event (hwcur=%u,event=%u," 677 "head=%u,hwtail=%u)", nm_i, event, head, 678 kring->nr_hwtail); 679 } 680 681 a.ifp = ifp; 682 a.ring_nr = ring_nr; 683 a.head = a.tail = NULL; 684 685 while (nm_i != head) { 686 struct netmap_slot *slot = &ring->slot[nm_i]; 687 u_int len = slot->len; 688 void *addr = NMB(na, slot); 689 /* device-specific */ 690 struct mbuf *m; 691 int tx_ret; 692 693 NM_CHECK_ADDR_LEN(na, addr, len); 694 695 /* Tale a mbuf from the tx pool (replenishing the pool 696 * entry if necessary) and copy in the user packet. */ 697 m = kring->tx_pool[nm_i]; 698 if (unlikely(m == NULL)) { 699 kring->tx_pool[nm_i] = m = 700 nm_os_get_mbuf(ifp, NETMAP_BUF_SIZE(na)); 701 if (m == NULL) { 702 nm_prlim(2, "Failed to replenish mbuf"); 703 /* Here we could schedule a timer which 704 * retries to replenish after a while, 705 * and notifies the client when it 706 * manages to replenish some slots. In 707 * any case we break early to avoid 708 * crashes. */ 709 break; 710 } 711 IFRATE(rate_ctx.new.txrepl++); 712 } 713 714 a.m = m; 715 a.addr = addr; 716 a.len = len; 717 a.qevent = (nm_i == event); 718 /* When not in txqdisc mode, we should ask 719 * notifications when NS_REPORT is set, or roughly 720 * every half ring. To optimize this, we set a 721 * notification event when the client runs out of 722 * TX ring space, or when transmission fails. In 723 * the latter case we also break early. 724 */ 725 tx_ret = nm_os_generic_xmit_frame(&a); 726 if (unlikely(tx_ret)) { 727 if (!gna->txqdisc) { 728 /* 729 * No room for this mbuf in the device driver. 730 * Request a notification FOR A PREVIOUS MBUF, 731 * then call generic_netmap_tx_clean(kring) to do the 732 * double check and see if we can free more buffers. 733 * If there is space continue, else break; 734 * NOTE: the double check is necessary if the problem 735 * occurs in the txsync call after selrecord(). 736 * Also, we need some way to tell the caller that not 737 * all buffers were queued onto the device (this was 738 * not a problem with native netmap driver where space 739 * is preallocated). The bridge has a similar problem 740 * and we solve it there by dropping the excess packets. 741 */ 742 generic_set_tx_event(kring, nm_i); 743 if (generic_netmap_tx_clean(kring, gna->txqdisc)) { 744 /* space now available */ 745 continue; 746 } else { 747 break; 748 } 749 } 750 751 /* In txqdisc mode, the netmap-aware qdisc 752 * queue has the same length as the number of 753 * netmap slots (N). Since tail is advanced 754 * only when packets are dequeued, qdisc 755 * queue overrun cannot happen, so 756 * nm_os_generic_xmit_frame() did not fail 757 * because of that. 758 * However, packets can be dropped because 759 * carrier is off, or because our qdisc is 760 * being deactivated, or possibly for other 761 * reasons. In these cases, we just let the 762 * packet to be dropped. */ 763 IFRATE(rate_ctx.new.txdrop++); 764 } 765 766 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); 767 nm_i = nm_next(nm_i, lim); 768 IFRATE(rate_ctx.new.txpkt++); 769 } 770 if (a.head != NULL) { 771 a.addr = NULL; 772 nm_os_generic_xmit_frame(&a); 773 } 774 /* Update hwcur to the next slot to transmit. Here nm_i 775 * is not necessarily head, we could break early. */ 776 kring->nr_hwcur = nm_i; 777 } 778 779 /* 780 * Second, reclaim completed buffers 781 */ 782 if (!gna->txqdisc && (flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring))) { 783 /* No more available slots? Set a notification event 784 * on a netmap slot that will be cleaned in the future. 785 * No doublecheck is performed, since txsync() will be 786 * called twice by netmap_poll(). 787 */ 788 generic_set_tx_event(kring, nm_i); 789 } 790 791 generic_netmap_tx_clean(kring, gna->txqdisc); 792 793 return 0; 794} 795 796 797/* 798 * This handler is registered (through nm_os_catch_rx()) 799 * within the attached network interface 800 * in the RX subsystem, so that every mbuf passed up by 801 * the driver can be stolen to the network stack. 802 * Stolen packets are put in a queue where the 803 * generic_netmap_rxsync() callback can extract them. 804 * Returns 1 if the packet was stolen, 0 otherwise. 805 */ 806int 807generic_rx_handler(struct ifnet *ifp, struct mbuf *m) 808{ 809 struct netmap_adapter *na = NA(ifp); 810 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; 811 struct netmap_kring *kring; 812 u_int work_done; 813 u_int r = MBUF_RXQ(m); /* receive ring number */ 814 815 if (r >= na->num_rx_rings) { 816 r = r % na->num_rx_rings; 817 } 818 819 kring = na->rx_rings[r]; 820 821 if (kring->nr_mode == NKR_NETMAP_OFF) { 822 /* We must not intercept this mbuf. */ 823 return 0; 824 } 825 826 /* limit the size of the queue */ 827 if (unlikely(!gna->rxsg && MBUF_LEN(m) > NETMAP_BUF_SIZE(na))) { 828 /* This may happen when GRO/LRO features are enabled for 829 * the NIC driver when the generic adapter does not 830 * support RX scatter-gather. */ 831 nm_prlim(2, "Warning: driver pushed up big packet " 832 "(size=%d)", (int)MBUF_LEN(m)); 833 m_freem(m); 834 } else if (unlikely(mbq_len(&kring->rx_queue) > 1024)) { 835 m_freem(m); 836 } else { 837 mbq_safe_enqueue(&kring->rx_queue, m); 838 } 839 840 if (netmap_generic_mit < 32768) { 841 /* no rx mitigation, pass notification up */ 842 netmap_generic_irq(na, r, &work_done); 843 } else { 844 /* same as send combining, filter notification if there is a 845 * pending timer, otherwise pass it up and start a timer. 846 */ 847 if (likely(nm_os_mitigation_active(&gna->mit[r]))) { 848 /* Record that there is some pending work. */ 849 gna->mit[r].mit_pending = 1; 850 } else { 851 netmap_generic_irq(na, r, &work_done); 852 nm_os_mitigation_start(&gna->mit[r]); 853 } 854 } 855 856 /* We have intercepted the mbuf. */ 857 return 1; 858} 859 860/* 861 * generic_netmap_rxsync() extracts mbufs from the queue filled by 862 * generic_netmap_rx_handler() and puts their content in the netmap 863 * receive ring. 864 * Access must be protected because the rx handler is asynchronous, 865 */ 866static int 867generic_netmap_rxsync(struct netmap_kring *kring, int flags) 868{ 869 struct netmap_ring *ring = kring->ring; 870 struct netmap_adapter *na = kring->na; 871 u_int nm_i; /* index into the netmap ring */ //j, 872 u_int n; 873 u_int const lim = kring->nkr_num_slots - 1; 874 u_int const head = kring->rhead; 875 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; 876 877 /* Adapter-specific variables. */ 878 u_int nm_buf_len = NETMAP_BUF_SIZE(na); 879 struct mbq tmpq; 880 struct mbuf *m; 881 int avail; /* in bytes */ 882 int mlen; 883 int copy; 884 885 if (head > lim) 886 return netmap_ring_reinit(kring); 887 888 IFRATE(rate_ctx.new.rxsync++); 889 890 /* 891 * First part: skip past packets that userspace has released. 892 * This can possibly make room for the second part. 893 */ 894 nm_i = kring->nr_hwcur; 895 if (nm_i != head) { 896 /* Userspace has released some packets. */ 897 for (n = 0; nm_i != head; n++) { 898 struct netmap_slot *slot = &ring->slot[nm_i]; 899 900 slot->flags &= ~NS_BUF_CHANGED; 901 nm_i = nm_next(nm_i, lim); 902 } 903 kring->nr_hwcur = head; 904 } 905 906 /* 907 * Second part: import newly received packets. 908 */ 909 if (!netmap_no_pendintr && !force_update) { 910 return 0; 911 } 912 913 nm_i = kring->nr_hwtail; /* First empty slot in the receive ring. */ 914 915 /* Compute the available space (in bytes) in this netmap ring. 916 * The first slot that is not considered in is the one before 917 * nr_hwcur. */ 918 919 avail = nm_prev(kring->nr_hwcur, lim) - nm_i; 920 if (avail < 0) 921 avail += lim + 1; 922 avail *= nm_buf_len; 923 924 /* First pass: While holding the lock on the RX mbuf queue, 925 * extract as many mbufs as they fit the available space, 926 * and put them in a temporary queue. 927 * To avoid performing a per-mbuf division (mlen / nm_buf_len) to 928 * to update avail, we do the update in a while loop that we 929 * also use to set the RX slots, but without performing the copy. */ 930 mbq_init(&tmpq); 931 mbq_lock(&kring->rx_queue); 932 for (n = 0;; n++) { 933 m = mbq_peek(&kring->rx_queue); 934 if (!m) { 935 /* No more packets from the driver. */ 936 break; 937 } 938 939 mlen = MBUF_LEN(m); 940 if (mlen > avail) { 941 /* No more space in the ring. */ 942 break; 943 } 944 945 mbq_dequeue(&kring->rx_queue); 946 947 while (mlen) { 948 copy = nm_buf_len; 949 if (mlen < copy) { 950 copy = mlen; 951 } 952 mlen -= copy; 953 avail -= nm_buf_len; 954 955 ring->slot[nm_i].len = copy; 956 ring->slot[nm_i].flags = (mlen ? NS_MOREFRAG : 0); 957 nm_i = nm_next(nm_i, lim); 958 } 959 960 mbq_enqueue(&tmpq, m); 961 } 962 mbq_unlock(&kring->rx_queue); 963 964 /* Second pass: Drain the temporary queue, going over the used RX slots, 965 * and perform the copy out of the RX queue lock. */ 966 nm_i = kring->nr_hwtail; 967 968 for (;;) { 969 void *nmaddr; 970 int ofs = 0; 971 int morefrag; 972 973 m = mbq_dequeue(&tmpq); 974 if (!m) { 975 break; 976 } 977 978 do { 979 nmaddr = NMB(na, &ring->slot[nm_i]); 980 /* We only check the address here on generic rx rings. */ 981 if (nmaddr == NETMAP_BUF_BASE(na)) { /* Bad buffer */ 982 m_freem(m); 983 mbq_purge(&tmpq); 984 mbq_fini(&tmpq); 985 return netmap_ring_reinit(kring); 986 } 987 988 copy = ring->slot[nm_i].len; 989 m_copydata(m, ofs, copy, nmaddr); 990 ofs += copy; 991 morefrag = ring->slot[nm_i].flags & NS_MOREFRAG; 992 nm_i = nm_next(nm_i, lim); 993 } while (morefrag); 994 995 m_freem(m); 996 } 997 998 mbq_fini(&tmpq); 999 1000 if (n) { 1001 kring->nr_hwtail = nm_i; 1002 IFRATE(rate_ctx.new.rxpkt += n); 1003 } 1004 kring->nr_kflags &= ~NKR_PENDINTR; 1005 1006 return 0; 1007} 1008 1009static void 1010generic_netmap_dtor(struct netmap_adapter *na) 1011{ 1012 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter*)na; 1013 struct ifnet *ifp = netmap_generic_getifp(gna); 1014 struct netmap_adapter *prev_na = gna->prev; 1015 1016 if (prev_na != NULL) { 1017 netmap_adapter_put(prev_na); 1018 if (nm_iszombie(na)) { 1019 /* 1020 * The driver has been removed without releasing 1021 * the reference so we need to do it here. 1022 */ 1023 netmap_adapter_put(prev_na); 1024 } 1025 nm_prinf("Native netmap adapter for %s restored", prev_na->name); 1026 } 1027 NM_RESTORE_NA(ifp, prev_na); 1028 /* 1029 * netmap_detach_common(), that it's called after this function, 1030 * overrides WNA(ifp) if na->ifp is not NULL. 1031 */ 1032 na->ifp = NULL; 1033 nm_prinf("Emulated netmap adapter for %s destroyed", na->name); 1034} 1035 1036int 1037na_is_generic(struct netmap_adapter *na) 1038{ 1039 return na->nm_register == generic_netmap_register; 1040} 1041 1042/* 1043 * generic_netmap_attach() makes it possible to use netmap on 1044 * a device without native netmap support. 1045 * This is less performant than native support but potentially 1046 * faster than raw sockets or similar schemes. 1047 * 1048 * In this "emulated" mode, netmap rings do not necessarily 1049 * have the same size as those in the NIC. We use a default 1050 * value and possibly override it if the OS has ways to fetch the 1051 * actual configuration. 1052 */ 1053int 1054generic_netmap_attach(struct ifnet *ifp) 1055{ 1056 struct netmap_adapter *na; 1057 struct netmap_generic_adapter *gna; 1058 int retval; 1059 u_int num_tx_desc, num_rx_desc; 1060 1061#ifdef __FreeBSD__ 1062 if (ifp->if_type == IFT_LOOP) { 1063 nm_prerr("if_loop is not supported by %s", __func__); 1064 return EINVAL; 1065 } 1066#endif 1067 1068 if (NM_NA_CLASH(ifp)) { 1069 /* If NA(ifp) is not null but there is no valid netmap 1070 * adapter it means that someone else is using the same 1071 * pointer (e.g. ax25_ptr on linux). This happens for 1072 * instance when also PF_RING is in use. */ 1073 nm_prerr("Error: netmap adapter hook is busy"); 1074 return EBUSY; 1075 } 1076 1077 num_tx_desc = num_rx_desc = netmap_generic_ringsize; /* starting point */ 1078 1079 nm_os_generic_find_num_desc(ifp, &num_tx_desc, &num_rx_desc); /* ignore errors */ 1080 if (num_tx_desc == 0 || num_rx_desc == 0) { 1081 nm_prerr("Device has no hw slots (tx %u, rx %u)", num_tx_desc, num_rx_desc); 1082 return EINVAL; 1083 } 1084 1085 gna = nm_os_malloc(sizeof(*gna)); 1086 if (gna == NULL) { 1087 nm_prerr("no memory on attach, give up"); 1088 return ENOMEM; 1089 } 1090 na = (struct netmap_adapter *)gna; 1091 strlcpy(na->name, ifp->if_xname, sizeof(na->name)); 1092 na->ifp = ifp; 1093 na->num_tx_desc = num_tx_desc; 1094 na->num_rx_desc = num_rx_desc; 1095 na->rx_buf_maxsize = 32768; 1096 na->nm_register = &generic_netmap_register; 1097 na->nm_txsync = &generic_netmap_txsync; 1098 na->nm_rxsync = &generic_netmap_rxsync; 1099 na->nm_dtor = &generic_netmap_dtor; 1100 /* when using generic, NAF_NETMAP_ON is set so we force 1101 * NAF_SKIP_INTR to use the regular interrupt handler 1102 */ 1103 na->na_flags = NAF_SKIP_INTR | NAF_HOST_RINGS; 1104 1105 nm_prdis("[GNA] num_tx_queues(%d), real_num_tx_queues(%d), len(%lu)", 1106 ifp->num_tx_queues, ifp->real_num_tx_queues, 1107 ifp->tx_queue_len); 1108 nm_prdis("[GNA] num_rx_queues(%d), real_num_rx_queues(%d)", 1109 ifp->num_rx_queues, ifp->real_num_rx_queues); 1110 1111 nm_os_generic_find_num_queues(ifp, &na->num_tx_rings, &na->num_rx_rings); 1112 1113 retval = netmap_attach_common(na); 1114 if (retval) { 1115 nm_os_free(gna); 1116 return retval; 1117 } 1118 1119 if (NM_NA_VALID(ifp)) { 1120 gna->prev = NA(ifp); /* save old na */ 1121 netmap_adapter_get(gna->prev); 1122 } 1123 NM_ATTACH_NA(ifp, na); 1124 1125 nm_os_generic_set_features(gna); 1126 1127 nm_prinf("Emulated adapter for %s created (prev was %s)", na->name, 1128 gna->prev ? gna->prev->name : "NULL"); 1129 1130 return retval; 1131} 1132