netmap_generic.c revision 267282
1/* 2 * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26/* 27 * This module implements netmap support on top of standard, 28 * unmodified device drivers. 29 * 30 * A NIOCREGIF request is handled here if the device does not 31 * have native support. TX and RX rings are emulated as follows: 32 * 33 * NIOCREGIF 34 * We preallocate a block of TX mbufs (roughly as many as 35 * tx descriptors; the number is not critical) to speed up 36 * operation during transmissions. The refcount on most of 37 * these buffers is artificially bumped up so we can recycle 38 * them more easily. Also, the destructor is intercepted 39 * so we use it as an interrupt notification to wake up 40 * processes blocked on a poll(). 41 * 42 * For each receive ring we allocate one "struct mbq" 43 * (an mbuf tailq plus a spinlock). We intercept packets 44 * (through if_input) 45 * on the receive path and put them in the mbq from which 46 * netmap receive routines can grab them. 47 * 48 * TX: 49 * in the generic_txsync() routine, netmap buffers are copied 50 * (or linked, in a future) to the preallocated mbufs 51 * and pushed to the transmit queue. Some of these mbufs 52 * (those with NS_REPORT, or otherwise every half ring) 53 * have the refcount=1, others have refcount=2. 54 * When the destructor is invoked, we take that as 55 * a notification that all mbufs up to that one in 56 * the specific ring have been completed, and generate 57 * the equivalent of a transmit interrupt. 58 * 59 * RX: 60 * 61 */ 62 63#ifdef __FreeBSD__ 64 65#include <sys/cdefs.h> /* prerequisite */ 66__FBSDID("$FreeBSD: stable/10/sys/dev/netmap/netmap_generic.c 267282 2014-06-09 15:24:45Z luigi $"); 67 68#include <sys/types.h> 69#include <sys/errno.h> 70#include <sys/malloc.h> 71#include <sys/lock.h> /* PROT_EXEC */ 72#include <sys/rwlock.h> 73#include <sys/socket.h> /* sockaddrs */ 74#include <sys/selinfo.h> 75#include <net/if.h> 76#include <net/if_var.h> 77#include <machine/bus.h> /* bus_dmamap_* in netmap_kern.h */ 78 79// XXX temporary - D() defined here 80#include <net/netmap.h> 81#include <dev/netmap/netmap_kern.h> 82#include <dev/netmap/netmap_mem2.h> 83 84#define rtnl_lock() ND("rtnl_lock called") 85#define rtnl_unlock() ND("rtnl_unlock called") 86#define MBUF_TXQ(m) ((m)->m_pkthdr.flowid) 87#define MBUF_RXQ(m) ((m)->m_pkthdr.flowid) 88#define smp_mb() 89 90/* 91 * FreeBSD mbuf allocator/deallocator in emulation mode: 92 * 93 * We allocate EXT_PACKET mbuf+clusters, but need to set M_NOFREE 94 * so that the destructor, if invoked, will not free the packet. 95 * In principle we should set the destructor only on demand, 96 * but since there might be a race we better do it on allocation. 97 * As a consequence, we also need to set the destructor or we 98 * would leak buffers. 99 */ 100 101/* 102 * mbuf wrappers 103 */ 104 105/* mbuf destructor, also need to change the type to EXT_EXTREF, 106 * add an M_NOFREE flag, and then clear the flag and 107 * chain into uma_zfree(zone_pack, mf) 108 * (or reinstall the buffer ?) 109 */ 110#define SET_MBUF_DESTRUCTOR(m, fn) do { \ 111 (m)->m_ext.ext_free = (void *)fn; \ 112 (m)->m_ext.ext_type = EXT_EXTREF; \ 113} while (0) 114 115static void 116netmap_default_mbuf_destructor(struct mbuf *m) 117{ 118 /* restore original mbuf */ 119 m->m_ext.ext_buf = m->m_data = m->m_ext.ext_arg1; 120 m->m_ext.ext_arg1 = NULL; 121 m->m_ext.ext_type = EXT_PACKET; 122 m->m_ext.ext_free = NULL; 123 if (*(m->m_ext.ref_cnt) == 0) 124 *(m->m_ext.ref_cnt) = 1; 125 uma_zfree(zone_pack, m); 126} 127 128static inline struct mbuf * 129netmap_get_mbuf(int len) 130{ 131 struct mbuf *m; 132 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR | M_NOFREE); 133 if (m) { 134 m->m_ext.ext_arg1 = m->m_ext.ext_buf; // XXX save 135 m->m_ext.ext_free = (void *)netmap_default_mbuf_destructor; 136 m->m_ext.ext_type = EXT_EXTREF; 137 ND(5, "create m %p refcnt %d", m, *m->m_ext.ref_cnt); 138 } 139 return m; 140} 141 142#define GET_MBUF_REFCNT(m) ((m)->m_ext.ref_cnt ? *(m)->m_ext.ref_cnt : -1) 143 144 145 146#else /* linux */ 147 148#include "bsd_glue.h" 149 150#include <linux/rtnetlink.h> /* rtnl_[un]lock() */ 151#include <linux/ethtool.h> /* struct ethtool_ops, get_ringparam */ 152#include <linux/hrtimer.h> 153 154//#define RATE /* Enables communication statistics. */ 155 156//#define REG_RESET 157 158#endif /* linux */ 159 160 161/* Common headers. */ 162#include <net/netmap.h> 163#include <dev/netmap/netmap_kern.h> 164#include <dev/netmap/netmap_mem2.h> 165 166 167 168/* ======================== usage stats =========================== */ 169 170#ifdef RATE 171#define IFRATE(x) x 172struct rate_stats { 173 unsigned long txpkt; 174 unsigned long txsync; 175 unsigned long txirq; 176 unsigned long rxpkt; 177 unsigned long rxirq; 178 unsigned long rxsync; 179}; 180 181struct rate_context { 182 unsigned refcount; 183 struct timer_list timer; 184 struct rate_stats new; 185 struct rate_stats old; 186}; 187 188#define RATE_PRINTK(_NAME_) \ 189 printk( #_NAME_ " = %lu Hz\n", (cur._NAME_ - ctx->old._NAME_)/RATE_PERIOD); 190#define RATE_PERIOD 2 191static void rate_callback(unsigned long arg) 192{ 193 struct rate_context * ctx = (struct rate_context *)arg; 194 struct rate_stats cur = ctx->new; 195 int r; 196 197 RATE_PRINTK(txpkt); 198 RATE_PRINTK(txsync); 199 RATE_PRINTK(txirq); 200 RATE_PRINTK(rxpkt); 201 RATE_PRINTK(rxsync); 202 RATE_PRINTK(rxirq); 203 printk("\n"); 204 205 ctx->old = cur; 206 r = mod_timer(&ctx->timer, jiffies + 207 msecs_to_jiffies(RATE_PERIOD * 1000)); 208 if (unlikely(r)) 209 D("[v1000] Error: mod_timer()"); 210} 211 212static struct rate_context rate_ctx; 213 214#else /* !RATE */ 215#define IFRATE(x) 216#endif /* !RATE */ 217 218 219/* =============== GENERIC NETMAP ADAPTER SUPPORT ================= */ 220#define GENERIC_BUF_SIZE netmap_buf_size /* Size of the mbufs in the Tx pool. */ 221 222/* 223 * Wrapper used by the generic adapter layer to notify 224 * the poller threads. Differently from netmap_rx_irq(), we check 225 * only IFCAP_NETMAP instead of NAF_NATIVE_ON to enable the irq. 226 */ 227static void 228netmap_generic_irq(struct ifnet *ifp, u_int q, u_int *work_done) 229{ 230 if (unlikely(!(ifp->if_capenable & IFCAP_NETMAP))) 231 return; 232 233 netmap_common_irq(ifp, q, work_done); 234} 235 236 237/* Enable/disable netmap mode for a generic network interface. */ 238static int 239generic_netmap_register(struct netmap_adapter *na, int enable) 240{ 241 struct ifnet *ifp = na->ifp; 242 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; 243 struct mbuf *m; 244 int error; 245 int i, r; 246 247 if (!na) 248 return EINVAL; 249 250#ifdef REG_RESET 251 error = ifp->netdev_ops->ndo_stop(ifp); 252 if (error) { 253 return error; 254 } 255#endif /* REG_RESET */ 256 257 if (enable) { /* Enable netmap mode. */ 258 /* Init the mitigation support on all the rx queues. */ 259 gna->mit = malloc(na->num_rx_rings * sizeof(struct nm_generic_mit), 260 M_DEVBUF, M_NOWAIT | M_ZERO); 261 if (!gna->mit) { 262 D("mitigation allocation failed"); 263 error = ENOMEM; 264 goto out; 265 } 266 for (r=0; r<na->num_rx_rings; r++) 267 netmap_mitigation_init(&gna->mit[r], na); 268 269 /* Initialize the rx queue, as generic_rx_handler() can 270 * be called as soon as netmap_catch_rx() returns. 271 */ 272 for (r=0; r<na->num_rx_rings; r++) { 273 mbq_safe_init(&na->rx_rings[r].rx_queue); 274 } 275 276 /* 277 * Preallocate packet buffers for the tx rings. 278 */ 279 for (r=0; r<na->num_tx_rings; r++) 280 na->tx_rings[r].tx_pool = NULL; 281 for (r=0; r<na->num_tx_rings; r++) { 282 na->tx_rings[r].tx_pool = malloc(na->num_tx_desc * sizeof(struct mbuf *), 283 M_DEVBUF, M_NOWAIT | M_ZERO); 284 if (!na->tx_rings[r].tx_pool) { 285 D("tx_pool allocation failed"); 286 error = ENOMEM; 287 goto free_tx_pools; 288 } 289 for (i=0; i<na->num_tx_desc; i++) 290 na->tx_rings[r].tx_pool[i] = NULL; 291 for (i=0; i<na->num_tx_desc; i++) { 292 m = netmap_get_mbuf(GENERIC_BUF_SIZE); 293 if (!m) { 294 D("tx_pool[%d] allocation failed", i); 295 error = ENOMEM; 296 goto free_tx_pools; 297 } 298 na->tx_rings[r].tx_pool[i] = m; 299 } 300 } 301 rtnl_lock(); 302 /* Prepare to intercept incoming traffic. */ 303 error = netmap_catch_rx(na, 1); 304 if (error) { 305 D("netdev_rx_handler_register() failed (%d)", error); 306 goto register_handler; 307 } 308 ifp->if_capenable |= IFCAP_NETMAP; 309 310 /* Make netmap control the packet steering. */ 311 netmap_catch_tx(gna, 1); 312 313 rtnl_unlock(); 314 315#ifdef RATE 316 if (rate_ctx.refcount == 0) { 317 D("setup_timer()"); 318 memset(&rate_ctx, 0, sizeof(rate_ctx)); 319 setup_timer(&rate_ctx.timer, &rate_callback, (unsigned long)&rate_ctx); 320 if (mod_timer(&rate_ctx.timer, jiffies + msecs_to_jiffies(1500))) { 321 D("Error: mod_timer()"); 322 } 323 } 324 rate_ctx.refcount++; 325#endif /* RATE */ 326 327 } else if (na->tx_rings[0].tx_pool) { 328 /* Disable netmap mode. We enter here only if the previous 329 generic_netmap_register(na, 1) was successfull. 330 If it was not, na->tx_rings[0].tx_pool was set to NULL by the 331 error handling code below. */ 332 rtnl_lock(); 333 334 ifp->if_capenable &= ~IFCAP_NETMAP; 335 336 /* Release packet steering control. */ 337 netmap_catch_tx(gna, 0); 338 339 /* Do not intercept packets on the rx path. */ 340 netmap_catch_rx(na, 0); 341 342 rtnl_unlock(); 343 344 /* Free the mbufs going to the netmap rings */ 345 for (r=0; r<na->num_rx_rings; r++) { 346 mbq_safe_purge(&na->rx_rings[r].rx_queue); 347 mbq_safe_destroy(&na->rx_rings[r].rx_queue); 348 } 349 350 for (r=0; r<na->num_rx_rings; r++) 351 netmap_mitigation_cleanup(&gna->mit[r]); 352 free(gna->mit, M_DEVBUF); 353 354 for (r=0; r<na->num_tx_rings; r++) { 355 for (i=0; i<na->num_tx_desc; i++) { 356 m_freem(na->tx_rings[r].tx_pool[i]); 357 } 358 free(na->tx_rings[r].tx_pool, M_DEVBUF); 359 } 360 361#ifdef RATE 362 if (--rate_ctx.refcount == 0) { 363 D("del_timer()"); 364 del_timer(&rate_ctx.timer); 365 } 366#endif 367 } 368 369#ifdef REG_RESET 370 error = ifp->netdev_ops->ndo_open(ifp); 371 if (error) { 372 goto free_tx_pools; 373 } 374#endif 375 376 return 0; 377 378register_handler: 379 rtnl_unlock(); 380free_tx_pools: 381 for (r=0; r<na->num_tx_rings; r++) { 382 if (na->tx_rings[r].tx_pool == NULL) 383 continue; 384 for (i=0; i<na->num_tx_desc; i++) 385 if (na->tx_rings[r].tx_pool[i]) 386 m_freem(na->tx_rings[r].tx_pool[i]); 387 free(na->tx_rings[r].tx_pool, M_DEVBUF); 388 na->tx_rings[r].tx_pool = NULL; 389 } 390 for (r=0; r<na->num_rx_rings; r++) { 391 netmap_mitigation_cleanup(&gna->mit[r]); 392 mbq_safe_destroy(&na->rx_rings[r].rx_queue); 393 } 394 free(gna->mit, M_DEVBUF); 395out: 396 397 return error; 398} 399 400/* 401 * Callback invoked when the device driver frees an mbuf used 402 * by netmap to transmit a packet. This usually happens when 403 * the NIC notifies the driver that transmission is completed. 404 */ 405static void 406generic_mbuf_destructor(struct mbuf *m) 407{ 408 netmap_generic_irq(MBUF_IFP(m), MBUF_TXQ(m), NULL); 409#ifdef __FreeBSD__ 410 if (netmap_verbose) 411 RD(5, "Tx irq (%p) queue %d index %d" , m, MBUF_TXQ(m), (int)(uintptr_t)m->m_ext.ext_arg1); 412 netmap_default_mbuf_destructor(m); 413#endif /* __FreeBSD__ */ 414 IFRATE(rate_ctx.new.txirq++); 415} 416 417/* Record completed transmissions and update hwtail. 418 * 419 * The oldest tx buffer not yet completed is at nr_hwtail + 1, 420 * nr_hwcur is the first unsent buffer. 421 */ 422static u_int 423generic_netmap_tx_clean(struct netmap_kring *kring) 424{ 425 u_int const lim = kring->nkr_num_slots - 1; 426 u_int nm_i = nm_next(kring->nr_hwtail, lim); 427 u_int hwcur = kring->nr_hwcur; 428 u_int n = 0; 429 struct mbuf **tx_pool = kring->tx_pool; 430 431 while (nm_i != hwcur) { /* buffers not completed */ 432 struct mbuf *m = tx_pool[nm_i]; 433 434 if (unlikely(m == NULL)) { 435 /* this is done, try to replenish the entry */ 436 tx_pool[nm_i] = m = netmap_get_mbuf(GENERIC_BUF_SIZE); 437 if (unlikely(m == NULL)) { 438 D("mbuf allocation failed, XXX error"); 439 // XXX how do we proceed ? break ? 440 return -ENOMEM; 441 } 442 } else if (GET_MBUF_REFCNT(m) != 1) { 443 break; /* This mbuf is still busy: its refcnt is 2. */ 444 } 445 n++; 446 nm_i = nm_next(nm_i, lim); 447 } 448 kring->nr_hwtail = nm_prev(nm_i, lim); 449 ND("tx completed [%d] -> hwtail %d", n, kring->nr_hwtail); 450 451 return n; 452} 453 454 455/* 456 * We have pending packets in the driver between nr_hwtail +1 and hwcur. 457 * Compute a position in the middle, to be used to generate 458 * a notification. 459 */ 460static inline u_int 461generic_tx_event_middle(struct netmap_kring *kring, u_int hwcur) 462{ 463 u_int n = kring->nkr_num_slots; 464 u_int ntc = nm_next(kring->nr_hwtail, n-1); 465 u_int e; 466 467 if (hwcur >= ntc) { 468 e = (hwcur + ntc) / 2; 469 } else { /* wrap around */ 470 e = (hwcur + n + ntc) / 2; 471 if (e >= n) { 472 e -= n; 473 } 474 } 475 476 if (unlikely(e >= n)) { 477 D("This cannot happen"); 478 e = 0; 479 } 480 481 return e; 482} 483 484/* 485 * We have pending packets in the driver between nr_hwtail+1 and hwcur. 486 * Schedule a notification approximately in the middle of the two. 487 * There is a race but this is only called within txsync which does 488 * a double check. 489 */ 490static void 491generic_set_tx_event(struct netmap_kring *kring, u_int hwcur) 492{ 493 struct mbuf *m; 494 u_int e; 495 496 if (nm_next(kring->nr_hwtail, kring->nkr_num_slots -1) == hwcur) { 497 return; /* all buffers are free */ 498 } 499 e = generic_tx_event_middle(kring, hwcur); 500 501 m = kring->tx_pool[e]; 502 ND(5, "Request Event at %d mbuf %p refcnt %d", e, m, m ? GET_MBUF_REFCNT(m) : -2 ); 503 if (m == NULL) { 504 /* This can happen if there is already an event on the netmap 505 slot 'e': There is nothing to do. */ 506 return; 507 } 508 kring->tx_pool[e] = NULL; 509 SET_MBUF_DESTRUCTOR(m, generic_mbuf_destructor); 510 511 // XXX wmb() ? 512 /* Decrement the refcount an free it if we have the last one. */ 513 m_freem(m); 514 smp_mb(); 515} 516 517 518/* 519 * generic_netmap_txsync() transforms netmap buffers into mbufs 520 * and passes them to the standard device driver 521 * (ndo_start_xmit() or ifp->if_transmit() ). 522 * On linux this is not done directly, but using dev_queue_xmit(), 523 * since it implements the TX flow control (and takes some locks). 524 */ 525static int 526generic_netmap_txsync(struct netmap_adapter *na, u_int ring_nr, int flags) 527{ 528 struct ifnet *ifp = na->ifp; 529 struct netmap_kring *kring = &na->tx_rings[ring_nr]; 530 struct netmap_ring *ring = kring->ring; 531 u_int nm_i; /* index into the netmap ring */ // j 532 u_int const lim = kring->nkr_num_slots - 1; 533 u_int const head = kring->rhead; 534 535 IFRATE(rate_ctx.new.txsync++); 536 537 // TODO: handle the case of mbuf allocation failure 538 539 rmb(); 540 541 /* 542 * First part: process new packets to send. 543 */ 544 nm_i = kring->nr_hwcur; 545 if (nm_i != head) { /* we have new packets to send */ 546 while (nm_i != head) { 547 struct netmap_slot *slot = &ring->slot[nm_i]; 548 u_int len = slot->len; 549 void *addr = NMB(slot); 550 551 /* device-specific */ 552 struct mbuf *m; 553 int tx_ret; 554 555 NM_CHECK_ADDR_LEN(addr, len); 556 557 /* Tale a mbuf from the tx pool and copy in the user packet. */ 558 m = kring->tx_pool[nm_i]; 559 if (unlikely(!m)) { 560 RD(5, "This should never happen"); 561 kring->tx_pool[nm_i] = m = netmap_get_mbuf(GENERIC_BUF_SIZE); 562 if (unlikely(m == NULL)) { 563 D("mbuf allocation failed"); 564 break; 565 } 566 } 567 /* XXX we should ask notifications when NS_REPORT is set, 568 * or roughly every half frame. We can optimize this 569 * by lazily requesting notifications only when a 570 * transmission fails. Probably the best way is to 571 * break on failures and set notifications when 572 * ring->cur == ring->tail || nm_i != cur 573 */ 574 tx_ret = generic_xmit_frame(ifp, m, addr, len, ring_nr); 575 if (unlikely(tx_ret)) { 576 RD(5, "start_xmit failed: err %d [nm_i %u, head %u, hwtail %u]", 577 tx_ret, nm_i, head, kring->nr_hwtail); 578 /* 579 * No room for this mbuf in the device driver. 580 * Request a notification FOR A PREVIOUS MBUF, 581 * then call generic_netmap_tx_clean(kring) to do the 582 * double check and see if we can free more buffers. 583 * If there is space continue, else break; 584 * NOTE: the double check is necessary if the problem 585 * occurs in the txsync call after selrecord(). 586 * Also, we need some way to tell the caller that not 587 * all buffers were queued onto the device (this was 588 * not a problem with native netmap driver where space 589 * is preallocated). The bridge has a similar problem 590 * and we solve it there by dropping the excess packets. 591 */ 592 generic_set_tx_event(kring, nm_i); 593 if (generic_netmap_tx_clean(kring)) { /* space now available */ 594 continue; 595 } else { 596 break; 597 } 598 } 599 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); 600 nm_i = nm_next(nm_i, lim); 601 IFRATE(rate_ctx.new.txpkt ++); 602 } 603 604 /* Update hwcur to the next slot to transmit. */ 605 kring->nr_hwcur = nm_i; /* not head, we could break early */ 606 } 607 608 /* 609 * Second, reclaim completed buffers 610 */ 611 if (flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) { 612 /* No more available slots? Set a notification event 613 * on a netmap slot that will be cleaned in the future. 614 * No doublecheck is performed, since txsync() will be 615 * called twice by netmap_poll(). 616 */ 617 generic_set_tx_event(kring, nm_i); 618 } 619 ND("tx #%d, hwtail = %d", n, kring->nr_hwtail); 620 621 generic_netmap_tx_clean(kring); 622 623 nm_txsync_finalize(kring); 624 625 return 0; 626} 627 628 629/* 630 * This handler is registered (through netmap_catch_rx()) 631 * within the attached network interface 632 * in the RX subsystem, so that every mbuf passed up by 633 * the driver can be stolen to the network stack. 634 * Stolen packets are put in a queue where the 635 * generic_netmap_rxsync() callback can extract them. 636 */ 637void 638generic_rx_handler(struct ifnet *ifp, struct mbuf *m) 639{ 640 struct netmap_adapter *na = NA(ifp); 641 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; 642 u_int work_done; 643 u_int rr = MBUF_RXQ(m); // receive ring number 644 645 if (rr >= na->num_rx_rings) { 646 rr = rr % na->num_rx_rings; // XXX expensive... 647 } 648 649 /* limit the size of the queue */ 650 if (unlikely(mbq_len(&na->rx_rings[rr].rx_queue) > 1024)) { 651 m_freem(m); 652 } else { 653 mbq_safe_enqueue(&na->rx_rings[rr].rx_queue, m); 654 } 655 656 if (netmap_generic_mit < 32768) { 657 /* no rx mitigation, pass notification up */ 658 netmap_generic_irq(na->ifp, rr, &work_done); 659 IFRATE(rate_ctx.new.rxirq++); 660 } else { 661 /* same as send combining, filter notification if there is a 662 * pending timer, otherwise pass it up and start a timer. 663 */ 664 if (likely(netmap_mitigation_active(&gna->mit[rr]))) { 665 /* Record that there is some pending work. */ 666 gna->mit[rr].mit_pending = 1; 667 } else { 668 netmap_generic_irq(na->ifp, rr, &work_done); 669 IFRATE(rate_ctx.new.rxirq++); 670 netmap_mitigation_start(&gna->mit[rr]); 671 } 672 } 673} 674 675/* 676 * generic_netmap_rxsync() extracts mbufs from the queue filled by 677 * generic_netmap_rx_handler() and puts their content in the netmap 678 * receive ring. 679 * Access must be protected because the rx handler is asynchronous, 680 */ 681static int 682generic_netmap_rxsync(struct netmap_adapter *na, u_int ring_nr, int flags) 683{ 684 struct netmap_kring *kring = &na->rx_rings[ring_nr]; 685 struct netmap_ring *ring = kring->ring; 686 u_int nm_i; /* index into the netmap ring */ //j, 687 u_int n; 688 u_int const lim = kring->nkr_num_slots - 1; 689 u_int const head = nm_rxsync_prologue(kring); 690 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; 691 692 if (head > lim) 693 return netmap_ring_reinit(kring); 694 695 /* 696 * First part: import newly received packets. 697 */ 698 if (netmap_no_pendintr || force_update) { 699 /* extract buffers from the rx queue, stop at most one 700 * slot before nr_hwcur (stop_i) 701 */ 702 uint16_t slot_flags = kring->nkr_slot_flags; 703 u_int stop_i = nm_prev(kring->nr_hwcur, lim); 704 705 nm_i = kring->nr_hwtail; /* first empty slot in the receive ring */ 706 for (n = 0; nm_i != stop_i; n++) { 707 int len; 708 void *addr = NMB(&ring->slot[nm_i]); 709 struct mbuf *m; 710 711 /* we only check the address here on generic rx rings */ 712 if (addr == netmap_buffer_base) { /* Bad buffer */ 713 return netmap_ring_reinit(kring); 714 } 715 /* 716 * Call the locked version of the function. 717 * XXX Ideally we could grab a batch of mbufs at once 718 * and save some locking overhead. 719 */ 720 m = mbq_safe_dequeue(&kring->rx_queue); 721 if (!m) /* no more data */ 722 break; 723 len = MBUF_LEN(m); 724 m_copydata(m, 0, len, addr); 725 ring->slot[nm_i].len = len; 726 ring->slot[nm_i].flags = slot_flags; 727 m_freem(m); 728 nm_i = nm_next(nm_i, lim); 729 } 730 if (n) { 731 kring->nr_hwtail = nm_i; 732 IFRATE(rate_ctx.new.rxpkt += n); 733 } 734 kring->nr_kflags &= ~NKR_PENDINTR; 735 } 736 737 // XXX should we invert the order ? 738 /* 739 * Second part: skip past packets that userspace has released. 740 */ 741 nm_i = kring->nr_hwcur; 742 if (nm_i != head) { 743 /* Userspace has released some packets. */ 744 for (n = 0; nm_i != head; n++) { 745 struct netmap_slot *slot = &ring->slot[nm_i]; 746 747 slot->flags &= ~NS_BUF_CHANGED; 748 nm_i = nm_next(nm_i, lim); 749 } 750 kring->nr_hwcur = head; 751 } 752 /* tell userspace that there might be new packets. */ 753 nm_rxsync_finalize(kring); 754 IFRATE(rate_ctx.new.rxsync++); 755 756 return 0; 757} 758 759static void 760generic_netmap_dtor(struct netmap_adapter *na) 761{ 762 struct ifnet *ifp = na->ifp; 763 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter*)na; 764 struct netmap_adapter *prev_na = gna->prev; 765 766 if (prev_na != NULL) { 767 D("Released generic NA %p", gna); 768 if_rele(na->ifp); 769 netmap_adapter_put(prev_na); 770 } 771 if (ifp != NULL) { 772 WNA(ifp) = prev_na; 773 D("Restored native NA %p", prev_na); 774 na->ifp = NULL; 775 } 776} 777 778/* 779 * generic_netmap_attach() makes it possible to use netmap on 780 * a device without native netmap support. 781 * This is less performant than native support but potentially 782 * faster than raw sockets or similar schemes. 783 * 784 * In this "emulated" mode, netmap rings do not necessarily 785 * have the same size as those in the NIC. We use a default 786 * value and possibly override it if the OS has ways to fetch the 787 * actual configuration. 788 */ 789int 790generic_netmap_attach(struct ifnet *ifp) 791{ 792 struct netmap_adapter *na; 793 struct netmap_generic_adapter *gna; 794 int retval; 795 u_int num_tx_desc, num_rx_desc; 796 797 num_tx_desc = num_rx_desc = netmap_generic_ringsize; /* starting point */ 798 799 generic_find_num_desc(ifp, &num_tx_desc, &num_rx_desc); 800 ND("Netmap ring size: TX = %d, RX = %d", num_tx_desc, num_rx_desc); 801 if (num_tx_desc == 0 || num_rx_desc == 0) { 802 D("Device has no hw slots (tx %u, rx %u)", num_tx_desc, num_rx_desc); 803 return EINVAL; 804 } 805 806 gna = malloc(sizeof(*gna), M_DEVBUF, M_NOWAIT | M_ZERO); 807 if (gna == NULL) { 808 D("no memory on attach, give up"); 809 return ENOMEM; 810 } 811 na = (struct netmap_adapter *)gna; 812 na->ifp = ifp; 813 na->num_tx_desc = num_tx_desc; 814 na->num_rx_desc = num_rx_desc; 815 na->nm_register = &generic_netmap_register; 816 na->nm_txsync = &generic_netmap_txsync; 817 na->nm_rxsync = &generic_netmap_rxsync; 818 na->nm_dtor = &generic_netmap_dtor; 819 /* when using generic, IFCAP_NETMAP is set so we force 820 * NAF_SKIP_INTR to use the regular interrupt handler 821 */ 822 na->na_flags = NAF_SKIP_INTR | NAF_HOST_RINGS; 823 824 ND("[GNA] num_tx_queues(%d), real_num_tx_queues(%d), len(%lu)", 825 ifp->num_tx_queues, ifp->real_num_tx_queues, 826 ifp->tx_queue_len); 827 ND("[GNA] num_rx_queues(%d), real_num_rx_queues(%d)", 828 ifp->num_rx_queues, ifp->real_num_rx_queues); 829 830 generic_find_num_queues(ifp, &na->num_tx_rings, &na->num_rx_rings); 831 832 retval = netmap_attach_common(na); 833 if (retval) { 834 free(gna, M_DEVBUF); 835 } 836 837 return retval; 838} 839