netmap_kern.h revision 331722
1/* 2 * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved. 3 * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27/* 28 * $FreeBSD: stable/11/sys/dev/netmap/netmap_kern.h 331722 2018-03-29 02:50:57Z eadler $ 29 * 30 * The header contains the definitions of constants and function 31 * prototypes used only in kernelspace. 32 */ 33 34#ifndef _NET_NETMAP_KERN_H_ 35#define _NET_NETMAP_KERN_H_ 36 37#if defined(linux) 38 39#if defined(CONFIG_NETMAP_VALE) 40#define WITH_VALE 41#endif 42#if defined(CONFIG_NETMAP_PIPE) 43#define WITH_PIPES 44#endif 45#if defined(CONFIG_NETMAP_MONITOR) 46#define WITH_MONITOR 47#endif 48#if defined(CONFIG_NETMAP_GENERIC) 49#define WITH_GENERIC 50#endif 51#if defined(CONFIG_NETMAP_V1000) 52#define WITH_V1000 53#endif 54 55#else /* not linux */ 56 57#define WITH_VALE // comment out to disable VALE support 58#define WITH_PIPES 59#define WITH_MONITOR 60#define WITH_GENERIC 61 62#endif 63 64#if defined(__FreeBSD__) 65 66#define likely(x) __builtin_expect((long)!!(x), 1L) 67#define unlikely(x) __builtin_expect((long)!!(x), 0L) 68 69#define NM_LOCK_T struct mtx /* low level spinlock, used to protect queues */ 70 71#define NM_MTX_T struct sx /* OS-specific mutex (sleepable) */ 72#define NM_MTX_INIT(m) sx_init(&(m), #m) 73#define NM_MTX_DESTROY(m) sx_destroy(&(m)) 74#define NM_MTX_LOCK(m) sx_xlock(&(m)) 75#define NM_MTX_UNLOCK(m) sx_xunlock(&(m)) 76#define NM_MTX_ASSERT(m) sx_assert(&(m), SA_XLOCKED) 77 78#define NM_SELINFO_T struct nm_selinfo 79#define MBUF_LEN(m) ((m)->m_pkthdr.len) 80#define MBUF_IFP(m) ((m)->m_pkthdr.rcvif) 81#define NM_SEND_UP(ifp, m) ((NA(ifp))->if_input)(ifp, m) 82 83#define NM_ATOMIC_T volatile int // XXX ? 84/* atomic operations */ 85#include <machine/atomic.h> 86#define NM_ATOMIC_TEST_AND_SET(p) (!atomic_cmpset_acq_int((p), 0, 1)) 87#define NM_ATOMIC_CLEAR(p) atomic_store_rel_int((p), 0) 88 89#if __FreeBSD_version >= 1100030 90#define WNA(_ifp) (_ifp)->if_netmap 91#else /* older FreeBSD */ 92#define WNA(_ifp) (_ifp)->if_pspare[0] 93#endif /* older FreeBSD */ 94 95#if __FreeBSD_version >= 1100005 96struct netmap_adapter *netmap_getna(if_t ifp); 97#endif 98 99#if __FreeBSD_version >= 1100027 100#define MBUF_REFCNT(m) ((m)->m_ext.ext_count) 101#define SET_MBUF_REFCNT(m, x) (m)->m_ext.ext_count = x 102#else 103#define MBUF_REFCNT(m) ((m)->m_ext.ref_cnt ? *((m)->m_ext.ref_cnt) : -1) 104#define SET_MBUF_REFCNT(m, x) *((m)->m_ext.ref_cnt) = x 105#endif 106 107MALLOC_DECLARE(M_NETMAP); 108 109struct nm_selinfo { 110 struct selinfo si; 111 struct mtx m; 112}; 113 114void freebsd_selwakeup(struct nm_selinfo *si, int pri); 115 116// XXX linux struct, not used in FreeBSD 117struct net_device_ops { 118}; 119struct ethtool_ops { 120}; 121struct hrtimer { 122}; 123#define NM_BNS_GET(b) 124#define NM_BNS_PUT(b) 125 126#elif defined (linux) 127 128#define NM_LOCK_T safe_spinlock_t // see bsd_glue.h 129#define NM_SELINFO_T wait_queue_head_t 130#define MBUF_LEN(m) ((m)->len) 131#define MBUF_IFP(m) ((m)->dev) 132#define NM_SEND_UP(ifp, m) \ 133 do { \ 134 m->priority = NM_MAGIC_PRIORITY_RX; \ 135 netif_rx(m); \ 136 } while (0) 137 138#define NM_ATOMIC_T volatile long unsigned int 139 140#define NM_MTX_T struct mutex /* OS-specific sleepable lock */ 141#define NM_MTX_INIT(m) mutex_init(&(m)) 142#define NM_MTX_DESTROY(m) do { (void)(m); } while (0) 143#define NM_MTX_LOCK(m) mutex_lock(&(m)) 144#define NM_MTX_UNLOCK(m) mutex_unlock(&(m)) 145#define NM_MTX_ASSERT(m) mutex_is_locked(&(m)) 146 147#ifndef DEV_NETMAP 148#define DEV_NETMAP 149#endif /* DEV_NETMAP */ 150 151#elif defined (__APPLE__) 152 153#warning apple support is incomplete. 154#define likely(x) __builtin_expect(!!(x), 1) 155#define unlikely(x) __builtin_expect(!!(x), 0) 156#define NM_LOCK_T IOLock * 157#define NM_SELINFO_T struct selinfo 158#define MBUF_LEN(m) ((m)->m_pkthdr.len) 159#define NM_SEND_UP(ifp, m) ((ifp)->if_input)(ifp, m) 160 161#else 162 163#error unsupported platform 164 165#endif /* end - platform-specific code */ 166 167#define NMG_LOCK_T NM_MTX_T 168#define NMG_LOCK_INIT() NM_MTX_INIT(netmap_global_lock) 169#define NMG_LOCK_DESTROY() NM_MTX_DESTROY(netmap_global_lock) 170#define NMG_LOCK() NM_MTX_LOCK(netmap_global_lock) 171#define NMG_UNLOCK() NM_MTX_UNLOCK(netmap_global_lock) 172#define NMG_LOCK_ASSERT() NM_MTX_ASSERT(netmap_global_lock) 173 174#define ND(format, ...) 175#define D(format, ...) \ 176 do { \ 177 struct timeval __xxts; \ 178 microtime(&__xxts); \ 179 printf("%03d.%06d [%4d] %-25s " format "\n", \ 180 (int)__xxts.tv_sec % 1000, (int)__xxts.tv_usec, \ 181 __LINE__, __FUNCTION__, ##__VA_ARGS__); \ 182 } while (0) 183 184/* rate limited, lps indicates how many per second */ 185#define RD(lps, format, ...) \ 186 do { \ 187 static int t0, __cnt; \ 188 if (t0 != time_second) { \ 189 t0 = time_second; \ 190 __cnt = 0; \ 191 } \ 192 if (__cnt++ < lps) \ 193 D(format, ##__VA_ARGS__); \ 194 } while (0) 195 196struct netmap_adapter; 197struct nm_bdg_fwd; 198struct nm_bridge; 199struct netmap_priv_d; 200 201const char *nm_dump_buf(char *p, int len, int lim, char *dst); 202 203#include "netmap_mbq.h" 204 205extern NMG_LOCK_T netmap_global_lock; 206 207enum txrx { NR_RX = 0, NR_TX = 1, NR_TXRX }; 208 209static __inline const char* 210nm_txrx2str(enum txrx t) 211{ 212 return (t== NR_RX ? "RX" : "TX"); 213} 214 215static __inline enum txrx 216nm_txrx_swap(enum txrx t) 217{ 218 return (t== NR_RX ? NR_TX : NR_RX); 219} 220 221#define for_rx_tx(t) for ((t) = 0; (t) < NR_TXRX; (t)++) 222 223 224/* 225 * private, kernel view of a ring. Keeps track of the status of 226 * a ring across system calls. 227 * 228 * nr_hwcur index of the next buffer to refill. 229 * It corresponds to ring->head 230 * at the time the system call returns. 231 * 232 * nr_hwtail index of the first buffer owned by the kernel. 233 * On RX, hwcur->hwtail are receive buffers 234 * not yet released. hwcur is advanced following 235 * ring->head, hwtail is advanced on incoming packets, 236 * and a wakeup is generated when hwtail passes ring->cur 237 * On TX, hwcur->rcur have been filled by the sender 238 * but not sent yet to the NIC; rcur->hwtail are available 239 * for new transmissions, and hwtail->hwcur-1 are pending 240 * transmissions not yet acknowledged. 241 * 242 * The indexes in the NIC and netmap rings are offset by nkr_hwofs slots. 243 * This is so that, on a reset, buffers owned by userspace are not 244 * modified by the kernel. In particular: 245 * RX rings: the next empty buffer (hwtail + hwofs) coincides with 246 * the next empty buffer as known by the hardware (next_to_check or so). 247 * TX rings: hwcur + hwofs coincides with next_to_send 248 * 249 * For received packets, slot->flags is set to nkr_slot_flags 250 * so we can provide a proper initial value (e.g. set NS_FORWARD 251 * when operating in 'transparent' mode). 252 * 253 * The following fields are used to implement lock-free copy of packets 254 * from input to output ports in VALE switch: 255 * nkr_hwlease buffer after the last one being copied. 256 * A writer in nm_bdg_flush reserves N buffers 257 * from nr_hwlease, advances it, then does the 258 * copy outside the lock. 259 * In RX rings (used for VALE ports), 260 * nkr_hwtail <= nkr_hwlease < nkr_hwcur+N-1 261 * In TX rings (used for NIC or host stack ports) 262 * nkr_hwcur <= nkr_hwlease < nkr_hwtail 263 * nkr_leases array of nkr_num_slots where writers can report 264 * completion of their block. NR_NOSLOT (~0) indicates 265 * that the writer has not finished yet 266 * nkr_lease_idx index of next free slot in nr_leases, to be assigned 267 * 268 * The kring is manipulated by txsync/rxsync and generic netmap function. 269 * 270 * Concurrent rxsync or txsync on the same ring are prevented through 271 * by nm_kr_(try)lock() which in turn uses nr_busy. This is all we need 272 * for NIC rings, and for TX rings attached to the host stack. 273 * 274 * RX rings attached to the host stack use an mbq (rx_queue) on both 275 * rxsync_from_host() and netmap_transmit(). The mbq is protected 276 * by its internal lock. 277 * 278 * RX rings attached to the VALE switch are accessed by both senders 279 * and receiver. They are protected through the q_lock on the RX ring. 280 */ 281struct netmap_kring { 282 struct netmap_ring *ring; 283 284 uint32_t nr_hwcur; 285 uint32_t nr_hwtail; 286 287 /* 288 * Copies of values in user rings, so we do not need to look 289 * at the ring (which could be modified). These are set in the 290 * *sync_prologue()/finalize() routines. 291 */ 292 uint32_t rhead; 293 uint32_t rcur; 294 uint32_t rtail; 295 296 uint32_t nr_kflags; /* private driver flags */ 297#define NKR_PENDINTR 0x1 // Pending interrupt. 298#define NKR_EXCLUSIVE 0x2 /* exclusive binding */ 299 uint32_t nkr_num_slots; 300 301 /* 302 * On a NIC reset, the NIC ring indexes may be reset but the 303 * indexes in the netmap rings remain the same. nkr_hwofs 304 * keeps track of the offset between the two. 305 */ 306 int32_t nkr_hwofs; 307 308 uint16_t nkr_slot_flags; /* initial value for flags */ 309 310 /* last_reclaim is opaque marker to help reduce the frequency 311 * of operations such as reclaiming tx buffers. A possible use 312 * is set it to ticks and do the reclaim only once per tick. 313 */ 314 uint64_t last_reclaim; 315 316 317 NM_SELINFO_T si; /* poll/select wait queue */ 318 NM_LOCK_T q_lock; /* protects kring and ring. */ 319 NM_ATOMIC_T nr_busy; /* prevent concurrent syscalls */ 320 321 struct netmap_adapter *na; 322 323 /* The following fields are for VALE switch support */ 324 struct nm_bdg_fwd *nkr_ft; 325 uint32_t *nkr_leases; 326#define NR_NOSLOT ((uint32_t)~0) /* used in nkr_*lease* */ 327 uint32_t nkr_hwlease; 328 uint32_t nkr_lease_idx; 329 330 /* while nkr_stopped is set, no new [tr]xsync operations can 331 * be started on this kring. 332 * This is used by netmap_disable_all_rings() 333 * to find a synchronization point where critical data 334 * structures pointed to by the kring can be added or removed 335 */ 336 volatile int nkr_stopped; 337 338 /* Support for adapters without native netmap support. 339 * On tx rings we preallocate an array of tx buffers 340 * (same size as the netmap ring), on rx rings we 341 * store incoming mbufs in a queue that is drained by 342 * a rxsync. 343 */ 344 struct mbuf **tx_pool; 345 // u_int nr_ntc; /* Emulation of a next-to-clean RX ring pointer. */ 346 struct mbq rx_queue; /* intercepted rx mbufs. */ 347 348 uint32_t users; /* existing bindings for this ring */ 349 350 uint32_t ring_id; /* debugging */ 351 enum txrx tx; /* kind of ring (tx or rx) */ 352 char name[64]; /* diagnostic */ 353 354 /* [tx]sync callback for this kring. 355 * The default nm_kring_create callback (netmap_krings_create) 356 * sets the nm_sync callback of each hardware tx(rx) kring to 357 * the corresponding nm_txsync(nm_rxsync) taken from the 358 * netmap_adapter; moreover, it sets the sync callback 359 * of the host tx(rx) ring to netmap_txsync_to_host 360 * (netmap_rxsync_from_host). 361 * 362 * Overrides: the above configuration is not changed by 363 * any of the nm_krings_create callbacks. 364 */ 365 int (*nm_sync)(struct netmap_kring *kring, int flags); 366 int (*nm_notify)(struct netmap_kring *kring, int flags); 367 368#ifdef WITH_PIPES 369 struct netmap_kring *pipe; /* if this is a pipe ring, 370 * pointer to the other end 371 */ 372 struct netmap_ring *save_ring; /* pointer to hidden rings 373 * (see netmap_pipe.c for details) 374 */ 375#endif /* WITH_PIPES */ 376 377#ifdef WITH_VALE 378 int (*save_notify)(struct netmap_kring *kring, int flags); 379#endif 380 381#ifdef WITH_MONITOR 382 /* array of krings that are monitoring this kring */ 383 struct netmap_kring **monitors; 384 uint32_t max_monitors; /* current size of the monitors array */ 385 uint32_t n_monitors; /* next unused entry in the monitor array */ 386 /* 387 * Monitors work by intercepting the sync and notify callbacks of the 388 * monitored krings. This is implemented by replacing the pointers 389 * above and saving the previous ones in mon_* pointers below 390 */ 391 int (*mon_sync)(struct netmap_kring *kring, int flags); 392 int (*mon_notify)(struct netmap_kring *kring, int flags); 393 394 uint32_t mon_tail; /* last seen slot on rx */ 395 uint32_t mon_pos; /* index of this ring in the monitored ring array */ 396#endif 397} __attribute__((__aligned__(64))); 398 399 400/* return the next index, with wraparound */ 401static inline uint32_t 402nm_next(uint32_t i, uint32_t lim) 403{ 404 return unlikely (i == lim) ? 0 : i + 1; 405} 406 407 408/* return the previous index, with wraparound */ 409static inline uint32_t 410nm_prev(uint32_t i, uint32_t lim) 411{ 412 return unlikely (i == 0) ? lim : i - 1; 413} 414 415 416/* 417 * 418 * Here is the layout for the Rx and Tx rings. 419 420 RxRING TxRING 421 422 +-----------------+ +-----------------+ 423 | | | | 424 |XXX free slot XXX| |XXX free slot XXX| 425 +-----------------+ +-----------------+ 426head->| owned by user |<-hwcur | not sent to nic |<-hwcur 427 | | | yet | 428 +-----------------+ | | 429 cur->| available to | | | 430 | user, not read | +-----------------+ 431 | yet | cur->| (being | 432 | | | prepared) | 433 | | | | 434 +-----------------+ + ------ + 435tail->| |<-hwtail | |<-hwlease 436 | (being | ... | | ... 437 | prepared) | ... | | ... 438 +-----------------+ ... | | ... 439 | |<-hwlease +-----------------+ 440 | | tail->| |<-hwtail 441 | | | | 442 | | | | 443 | | | | 444 +-----------------+ +-----------------+ 445 446 * The cur/tail (user view) and hwcur/hwtail (kernel view) 447 * are used in the normal operation of the card. 448 * 449 * When a ring is the output of a switch port (Rx ring for 450 * a VALE port, Tx ring for the host stack or NIC), slots 451 * are reserved in blocks through 'hwlease' which points 452 * to the next unused slot. 453 * On an Rx ring, hwlease is always after hwtail, 454 * and completions cause hwtail to advance. 455 * On a Tx ring, hwlease is always between cur and hwtail, 456 * and completions cause cur to advance. 457 * 458 * nm_kr_space() returns the maximum number of slots that 459 * can be assigned. 460 * nm_kr_lease() reserves the required number of buffers, 461 * advances nkr_hwlease and also returns an entry in 462 * a circular array where completions should be reported. 463 */ 464 465 466struct netmap_lut { 467 struct lut_entry *lut; 468 uint32_t objtotal; /* max buffer index */ 469 uint32_t objsize; /* buffer size */ 470}; 471 472struct netmap_vp_adapter; // forward 473 474/* 475 * The "struct netmap_adapter" extends the "struct adapter" 476 * (or equivalent) device descriptor. 477 * It contains all base fields needed to support netmap operation. 478 * There are in fact different types of netmap adapters 479 * (native, generic, VALE switch...) so a netmap_adapter is 480 * just the first field in the derived type. 481 */ 482struct netmap_adapter { 483 /* 484 * On linux we do not have a good way to tell if an interface 485 * is netmap-capable. So we always use the following trick: 486 * NA(ifp) points here, and the first entry (which hopefully 487 * always exists and is at least 32 bits) contains a magic 488 * value which we can use to detect that the interface is good. 489 */ 490 uint32_t magic; 491 uint32_t na_flags; /* enabled, and other flags */ 492#define NAF_SKIP_INTR 1 /* use the regular interrupt handler. 493 * useful during initialization 494 */ 495#define NAF_SW_ONLY 2 /* forward packets only to sw adapter */ 496#define NAF_BDG_MAYSLEEP 4 /* the bridge is allowed to sleep when 497 * forwarding packets coming from this 498 * interface 499 */ 500#define NAF_MEM_OWNER 8 /* the adapter uses its own memory area 501 * that cannot be changed 502 */ 503#define NAF_NATIVE 16 /* the adapter is native. 504 * Virtual ports (non persistent vale ports, 505 * pipes, monitors...) should never use 506 * this flag. 507 */ 508#define NAF_NETMAP_ON 32 /* netmap is active (either native or 509 * emulated). Where possible (e.g. FreeBSD) 510 * IFCAP_NETMAP also mirrors this flag. 511 */ 512#define NAF_HOST_RINGS 64 /* the adapter supports the host rings */ 513#define NAF_FORCE_NATIVE 128 /* the adapter is always NATIVE */ 514#define NAF_BUSY (1U<<31) /* the adapter is used internally and 515 * cannot be registered from userspace 516 */ 517 int active_fds; /* number of user-space descriptors using this 518 interface, which is equal to the number of 519 struct netmap_if objs in the mapped region. */ 520 521 u_int num_rx_rings; /* number of adapter receive rings */ 522 u_int num_tx_rings; /* number of adapter transmit rings */ 523 524 u_int num_tx_desc; /* number of descriptor in each queue */ 525 u_int num_rx_desc; 526 527 /* tx_rings and rx_rings are private but allocated 528 * as a contiguous chunk of memory. Each array has 529 * N+1 entries, for the adapter queues and for the host queue. 530 */ 531 struct netmap_kring *tx_rings; /* array of TX rings. */ 532 struct netmap_kring *rx_rings; /* array of RX rings. */ 533 534 void *tailroom; /* space below the rings array */ 535 /* (used for leases) */ 536 537 538 NM_SELINFO_T si[NR_TXRX]; /* global wait queues */ 539 540 /* count users of the global wait queues */ 541 int si_users[NR_TXRX]; 542 543 void *pdev; /* used to store pci device */ 544 545 /* copy of if_qflush and if_transmit pointers, to intercept 546 * packets from the network stack when netmap is active. 547 */ 548 int (*if_transmit)(struct ifnet *, struct mbuf *); 549 550 /* copy of if_input for netmap_send_up() */ 551 void (*if_input)(struct ifnet *, struct mbuf *); 552 553 /* references to the ifnet and device routines, used by 554 * the generic netmap functions. 555 */ 556 struct ifnet *ifp; /* adapter is ifp->if_softc */ 557 558 /*---- callbacks for this netmap adapter -----*/ 559 /* 560 * nm_dtor() is the cleanup routine called when destroying 561 * the adapter. 562 * Called with NMG_LOCK held. 563 * 564 * nm_register() is called on NIOCREGIF and close() to enter 565 * or exit netmap mode on the NIC 566 * Called with NNG_LOCK held. 567 * 568 * nm_txsync() pushes packets to the underlying hw/switch 569 * 570 * nm_rxsync() collects packets from the underlying hw/switch 571 * 572 * nm_config() returns configuration information from the OS 573 * Called with NMG_LOCK held. 574 * 575 * nm_krings_create() create and init the tx_rings and 576 * rx_rings arrays of kring structures. In particular, 577 * set the nm_sync callbacks for each ring. 578 * There is no need to also allocate the corresponding 579 * netmap_rings, since netmap_mem_rings_create() will always 580 * be called to provide the missing ones. 581 * Called with NNG_LOCK held. 582 * 583 * nm_krings_delete() cleanup and delete the tx_rings and rx_rings 584 * arrays 585 * Called with NMG_LOCK held. 586 * 587 * nm_notify() is used to act after data have become available 588 * (or the stopped state of the ring has changed) 589 * For hw devices this is typically a selwakeup(), 590 * but for NIC/host ports attached to a switch (or vice-versa) 591 * we also need to invoke the 'txsync' code downstream. 592 */ 593 void (*nm_dtor)(struct netmap_adapter *); 594 595 int (*nm_register)(struct netmap_adapter *, int onoff); 596 597 int (*nm_txsync)(struct netmap_kring *kring, int flags); 598 int (*nm_rxsync)(struct netmap_kring *kring, int flags); 599 int (*nm_notify)(struct netmap_kring *kring, int flags); 600#define NAF_FORCE_READ 1 601#define NAF_FORCE_RECLAIM 2 602 /* return configuration information */ 603 int (*nm_config)(struct netmap_adapter *, 604 u_int *txr, u_int *txd, u_int *rxr, u_int *rxd); 605 int (*nm_krings_create)(struct netmap_adapter *); 606 void (*nm_krings_delete)(struct netmap_adapter *); 607#ifdef WITH_VALE 608 /* 609 * nm_bdg_attach() initializes the na_vp field to point 610 * to an adapter that can be attached to a VALE switch. If the 611 * current adapter is already a VALE port, na_vp is simply a cast; 612 * otherwise, na_vp points to a netmap_bwrap_adapter. 613 * If applicable, this callback also initializes na_hostvp, 614 * that can be used to connect the adapter host rings to the 615 * switch. 616 * Called with NMG_LOCK held. 617 * 618 * nm_bdg_ctl() is called on the actual attach/detach to/from 619 * to/from the switch, to perform adapter-specific 620 * initializations 621 * Called with NMG_LOCK held. 622 */ 623 int (*nm_bdg_attach)(const char *bdg_name, struct netmap_adapter *); 624 int (*nm_bdg_ctl)(struct netmap_adapter *, struct nmreq *, int); 625 626 /* adapter used to attach this adapter to a VALE switch (if any) */ 627 struct netmap_vp_adapter *na_vp; 628 /* adapter used to attach the host rings of this adapter 629 * to a VALE switch (if any) */ 630 struct netmap_vp_adapter *na_hostvp; 631#endif 632 633 /* standard refcount to control the lifetime of the adapter 634 * (it should be equal to the lifetime of the corresponding ifp) 635 */ 636 int na_refcount; 637 638 /* memory allocator (opaque) 639 * We also cache a pointer to the lut_entry for translating 640 * buffer addresses, and the total number of buffers. 641 */ 642 struct netmap_mem_d *nm_mem; 643 struct netmap_lut na_lut; 644 645 /* additional information attached to this adapter 646 * by other netmap subsystems. Currently used by 647 * bwrap and LINUX/v1000. 648 */ 649 void *na_private; 650 651 /* array of pipes that have this adapter as a parent */ 652 struct netmap_pipe_adapter **na_pipes; 653 int na_next_pipe; /* next free slot in the array */ 654 int na_max_pipes; /* size of the array */ 655 656 char name[64]; 657}; 658 659static __inline u_int 660nma_get_ndesc(struct netmap_adapter *na, enum txrx t) 661{ 662 return (t == NR_TX ? na->num_tx_desc : na->num_rx_desc); 663} 664 665static __inline void 666nma_set_ndesc(struct netmap_adapter *na, enum txrx t, u_int v) 667{ 668 if (t == NR_TX) 669 na->num_tx_desc = v; 670 else 671 na->num_rx_desc = v; 672} 673 674static __inline u_int 675nma_get_nrings(struct netmap_adapter *na, enum txrx t) 676{ 677 return (t == NR_TX ? na->num_tx_rings : na->num_rx_rings); 678} 679 680static __inline void 681nma_set_nrings(struct netmap_adapter *na, enum txrx t, u_int v) 682{ 683 if (t == NR_TX) 684 na->num_tx_rings = v; 685 else 686 na->num_rx_rings = v; 687} 688 689static __inline struct netmap_kring* 690NMR(struct netmap_adapter *na, enum txrx t) 691{ 692 return (t == NR_TX ? na->tx_rings : na->rx_rings); 693} 694 695/* 696 * If the NIC is owned by the kernel 697 * (i.e., bridge), neither another bridge nor user can use it; 698 * if the NIC is owned by a user, only users can share it. 699 * Evaluation must be done under NMG_LOCK(). 700 */ 701#define NETMAP_OWNED_BY_KERN(na) ((na)->na_flags & NAF_BUSY) 702#define NETMAP_OWNED_BY_ANY(na) \ 703 (NETMAP_OWNED_BY_KERN(na) || ((na)->active_fds > 0)) 704 705/* 706 * derived netmap adapters for various types of ports 707 */ 708struct netmap_vp_adapter { /* VALE software port */ 709 struct netmap_adapter up; 710 711 /* 712 * Bridge support: 713 * 714 * bdg_port is the port number used in the bridge; 715 * na_bdg points to the bridge this NA is attached to. 716 */ 717 int bdg_port; 718 struct nm_bridge *na_bdg; 719 int retry; 720 721 /* Offset of ethernet header for each packet. */ 722 u_int virt_hdr_len; 723 /* Maximum Frame Size, used in bdg_mismatch_datapath() */ 724 u_int mfs; 725 /* Last source MAC on this port */ 726 uint64_t last_smac; 727}; 728 729 730struct netmap_hw_adapter { /* physical device */ 731 struct netmap_adapter up; 732 733 struct net_device_ops nm_ndo; // XXX linux only 734 struct ethtool_ops nm_eto; // XXX linux only 735 const struct ethtool_ops* save_ethtool; 736 737 int (*nm_hw_register)(struct netmap_adapter *, int onoff); 738}; 739 740#ifdef WITH_GENERIC 741/* Mitigation support. */ 742struct nm_generic_mit { 743 struct hrtimer mit_timer; 744 int mit_pending; 745 int mit_ring_idx; /* index of the ring being mitigated */ 746 struct netmap_adapter *mit_na; /* backpointer */ 747}; 748 749struct netmap_generic_adapter { /* emulated device */ 750 struct netmap_hw_adapter up; 751 752 /* Pointer to a previously used netmap adapter. */ 753 struct netmap_adapter *prev; 754 755 /* generic netmap adapters support: 756 * a net_device_ops struct overrides ndo_select_queue(), 757 * save_if_input saves the if_input hook (FreeBSD), 758 * mit implements rx interrupt mitigation, 759 */ 760 struct net_device_ops generic_ndo; 761 void (*save_if_input)(struct ifnet *, struct mbuf *); 762 763 struct nm_generic_mit *mit; 764#ifdef linux 765 netdev_tx_t (*save_start_xmit)(struct mbuf *, struct ifnet *); 766#endif 767}; 768#endif /* WITH_GENERIC */ 769 770static __inline int 771netmap_real_rings(struct netmap_adapter *na, enum txrx t) 772{ 773 return nma_get_nrings(na, t) + !!(na->na_flags & NAF_HOST_RINGS); 774} 775 776#ifdef WITH_VALE 777 778/* 779 * Bridge wrapper for non VALE ports attached to a VALE switch. 780 * 781 * The real device must already have its own netmap adapter (hwna). 782 * The bridge wrapper and the hwna adapter share the same set of 783 * netmap rings and buffers, but they have two separate sets of 784 * krings descriptors, with tx/rx meanings swapped: 785 * 786 * netmap 787 * bwrap krings rings krings hwna 788 * +------+ +------+ +-----+ +------+ +------+ 789 * |tx_rings->| |\ /| |----| |<-tx_rings| 790 * | | +------+ \ / +-----+ +------+ | | 791 * | | X | | 792 * | | / \ | | 793 * | | +------+/ \+-----+ +------+ | | 794 * |rx_rings->| | | |----| |<-rx_rings| 795 * | | +------+ +-----+ +------+ | | 796 * +------+ +------+ 797 * 798 * - packets coming from the bridge go to the brwap rx rings, 799 * which are also the hwna tx rings. The bwrap notify callback 800 * will then complete the hwna tx (see netmap_bwrap_notify). 801 * 802 * - packets coming from the outside go to the hwna rx rings, 803 * which are also the bwrap tx rings. The (overwritten) hwna 804 * notify method will then complete the bridge tx 805 * (see netmap_bwrap_intr_notify). 806 * 807 * The bridge wrapper may optionally connect the hwna 'host' rings 808 * to the bridge. This is done by using a second port in the 809 * bridge and connecting it to the 'host' netmap_vp_adapter 810 * contained in the netmap_bwrap_adapter. The brwap host adapter 811 * cross-links the hwna host rings in the same way as shown above. 812 * 813 * - packets coming from the bridge and directed to the host stack 814 * are handled by the bwrap host notify callback 815 * (see netmap_bwrap_host_notify) 816 * 817 * - packets coming from the host stack are still handled by the 818 * overwritten hwna notify callback (netmap_bwrap_intr_notify), 819 * but are diverted to the host adapter depending on the ring number. 820 * 821 */ 822struct netmap_bwrap_adapter { 823 struct netmap_vp_adapter up; 824 struct netmap_vp_adapter host; /* for host rings */ 825 struct netmap_adapter *hwna; /* the underlying device */ 826 827 /* backup of the hwna memory allocator */ 828 struct netmap_mem_d *save_nmd; 829 830 /* 831 * When we attach a physical interface to the bridge, we 832 * allow the controlling process to terminate, so we need 833 * a place to store the n_detmap_priv_d data structure. 834 * This is only done when physical interfaces 835 * are attached to a bridge. 836 */ 837 struct netmap_priv_d *na_kpriv; 838}; 839int netmap_bwrap_attach(const char *name, struct netmap_adapter *); 840 841 842#endif /* WITH_VALE */ 843 844#ifdef WITH_PIPES 845 846#define NM_MAXPIPES 64 /* max number of pipes per adapter */ 847 848struct netmap_pipe_adapter { 849 struct netmap_adapter up; 850 851 u_int id; /* pipe identifier */ 852 int role; /* either NR_REG_PIPE_MASTER or NR_REG_PIPE_SLAVE */ 853 854 struct netmap_adapter *parent; /* adapter that owns the memory */ 855 struct netmap_pipe_adapter *peer; /* the other end of the pipe */ 856 int peer_ref; /* 1 iff we are holding a ref to the peer */ 857 858 u_int parent_slot; /* index in the parent pipe array */ 859}; 860 861#endif /* WITH_PIPES */ 862 863 864/* return slots reserved to rx clients; used in drivers */ 865static inline uint32_t 866nm_kr_rxspace(struct netmap_kring *k) 867{ 868 int space = k->nr_hwtail - k->nr_hwcur; 869 if (space < 0) 870 space += k->nkr_num_slots; 871 ND("preserving %d rx slots %d -> %d", space, k->nr_hwcur, k->nr_hwtail); 872 873 return space; 874} 875 876 877/* True if no space in the tx ring. only valid after txsync_prologue */ 878static inline int 879nm_kr_txempty(struct netmap_kring *kring) 880{ 881 return kring->rcur == kring->nr_hwtail; 882} 883 884 885/* 886 * protect against multiple threads using the same ring. 887 * also check that the ring has not been stopped. 888 * We only care for 0 or !=0 as a return code. 889 */ 890#define NM_KR_BUSY 1 891#define NM_KR_STOPPED 2 892 893 894static __inline void nm_kr_put(struct netmap_kring *kr) 895{ 896 NM_ATOMIC_CLEAR(&kr->nr_busy); 897} 898 899 900static __inline int nm_kr_tryget(struct netmap_kring *kr) 901{ 902 /* check a first time without taking the lock 903 * to avoid starvation for nm_kr_get() 904 */ 905 if (unlikely(kr->nkr_stopped)) { 906 ND("ring %p stopped (%d)", kr, kr->nkr_stopped); 907 return NM_KR_STOPPED; 908 } 909 if (unlikely(NM_ATOMIC_TEST_AND_SET(&kr->nr_busy))) 910 return NM_KR_BUSY; 911 /* check a second time with lock held */ 912 if (unlikely(kr->nkr_stopped)) { 913 ND("ring %p stopped (%d)", kr, kr->nkr_stopped); 914 nm_kr_put(kr); 915 return NM_KR_STOPPED; 916 } 917 return 0; 918} 919 920static __inline void nm_kr_get(struct netmap_kring *kr) 921{ 922 while (NM_ATOMIC_TEST_AND_SET(&kr->nr_busy)) 923 tsleep(kr, 0, "NM_KR_GET", 4); 924} 925 926 927/* 928 * The following functions are used by individual drivers to 929 * support netmap operation. 930 * 931 * netmap_attach() initializes a struct netmap_adapter, allocating the 932 * struct netmap_ring's and the struct selinfo. 933 * 934 * netmap_detach() frees the memory allocated by netmap_attach(). 935 * 936 * netmap_transmit() replaces the if_transmit routine of the interface, 937 * and is used to intercept packets coming from the stack. 938 * 939 * netmap_load_map/netmap_reload_map are helper routines to set/reset 940 * the dmamap for a packet buffer 941 * 942 * netmap_reset() is a helper routine to be called in the hw driver 943 * when reinitializing a ring. It should not be called by 944 * virtual ports (vale, pipes, monitor) 945 */ 946int netmap_attach(struct netmap_adapter *); 947void netmap_detach(struct ifnet *); 948int netmap_transmit(struct ifnet *, struct mbuf *); 949struct netmap_slot *netmap_reset(struct netmap_adapter *na, 950 enum txrx tx, u_int n, u_int new_cur); 951int netmap_ring_reinit(struct netmap_kring *); 952 953/* default functions to handle rx/tx interrupts */ 954int netmap_rx_irq(struct ifnet *, u_int, u_int *); 955#define netmap_tx_irq(_n, _q) netmap_rx_irq(_n, _q, NULL) 956void netmap_common_irq(struct ifnet *, u_int, u_int *work_done); 957 958 959#ifdef WITH_VALE 960/* functions used by external modules to interface with VALE */ 961#define netmap_vp_to_ifp(_vp) ((_vp)->up.ifp) 962#define netmap_ifp_to_vp(_ifp) (NA(_ifp)->na_vp) 963#define netmap_ifp_to_host_vp(_ifp) (NA(_ifp)->na_hostvp) 964#define netmap_bdg_idx(_vp) ((_vp)->bdg_port) 965const char *netmap_bdg_name(struct netmap_vp_adapter *); 966#else /* !WITH_VALE */ 967#define netmap_vp_to_ifp(_vp) NULL 968#define netmap_ifp_to_vp(_ifp) NULL 969#define netmap_ifp_to_host_vp(_ifp) NULL 970#define netmap_bdg_idx(_vp) -1 971#define netmap_bdg_name(_vp) NULL 972#endif /* WITH_VALE */ 973 974static inline int 975nm_netmap_on(struct netmap_adapter *na) 976{ 977 return na && na->na_flags & NAF_NETMAP_ON; 978} 979 980static inline int 981nm_native_on(struct netmap_adapter *na) 982{ 983 return nm_netmap_on(na) && (na->na_flags & NAF_NATIVE); 984} 985 986/* set/clear native flags and if_transmit/netdev_ops */ 987static inline void 988nm_set_native_flags(struct netmap_adapter *na) 989{ 990 struct ifnet *ifp = na->ifp; 991 992 na->na_flags |= NAF_NETMAP_ON; 993#ifdef IFCAP_NETMAP /* or FreeBSD ? */ 994 ifp->if_capenable |= IFCAP_NETMAP; 995#endif 996#ifdef __FreeBSD__ 997 na->if_transmit = ifp->if_transmit; 998 ifp->if_transmit = netmap_transmit; 999#else 1000 na->if_transmit = (void *)ifp->netdev_ops; 1001 ifp->netdev_ops = &((struct netmap_hw_adapter *)na)->nm_ndo; 1002 ((struct netmap_hw_adapter *)na)->save_ethtool = ifp->ethtool_ops; 1003 ifp->ethtool_ops = &((struct netmap_hw_adapter*)na)->nm_eto; 1004#endif 1005} 1006 1007 1008static inline void 1009nm_clear_native_flags(struct netmap_adapter *na) 1010{ 1011 struct ifnet *ifp = na->ifp; 1012 1013#ifdef __FreeBSD__ 1014 ifp->if_transmit = na->if_transmit; 1015#else 1016 ifp->netdev_ops = (void *)na->if_transmit; 1017 ifp->ethtool_ops = ((struct netmap_hw_adapter*)na)->save_ethtool; 1018#endif 1019 na->na_flags &= ~NAF_NETMAP_ON; 1020#ifdef IFCAP_NETMAP /* or FreeBSD ? */ 1021 ifp->if_capenable &= ~IFCAP_NETMAP; 1022#endif 1023} 1024 1025 1026/* check/fix address and len in tx rings */ 1027#if 1 /* debug version */ 1028#define NM_CHECK_ADDR_LEN(_na, _a, _l) do { \ 1029 if (_a == NETMAP_BUF_BASE(_na) || _l > NETMAP_BUF_SIZE(_na)) { \ 1030 RD(5, "bad addr/len ring %d slot %d idx %d len %d", \ 1031 kring->ring_id, nm_i, slot->buf_idx, len); \ 1032 if (_l > NETMAP_BUF_SIZE(_na)) \ 1033 _l = NETMAP_BUF_SIZE(_na); \ 1034 } } while (0) 1035#else /* no debug version */ 1036#define NM_CHECK_ADDR_LEN(_na, _a, _l) do { \ 1037 if (_l > NETMAP_BUF_SIZE(_na)) \ 1038 _l = NETMAP_BUF_SIZE(_na); \ 1039 } while (0) 1040#endif 1041 1042 1043/*---------------------------------------------------------------*/ 1044/* 1045 * Support routines used by netmap subsystems 1046 * (native drivers, VALE, generic, pipes, monitors, ...) 1047 */ 1048 1049 1050/* common routine for all functions that create a netmap adapter. It performs 1051 * two main tasks: 1052 * - if the na points to an ifp, mark the ifp as netmap capable 1053 * using na as its native adapter; 1054 * - provide defaults for the setup callbacks and the memory allocator 1055 */ 1056int netmap_attach_common(struct netmap_adapter *); 1057/* common actions to be performed on netmap adapter destruction */ 1058void netmap_detach_common(struct netmap_adapter *); 1059/* fill priv->np_[tr]xq{first,last} using the ringid and flags information 1060 * coming from a struct nmreq 1061 */ 1062int netmap_interp_ringid(struct netmap_priv_d *priv, uint16_t ringid, uint32_t flags); 1063/* update the ring parameters (number and size of tx and rx rings). 1064 * It calls the nm_config callback, if available. 1065 */ 1066int netmap_update_config(struct netmap_adapter *na); 1067/* create and initialize the common fields of the krings array. 1068 * using the information that must be already available in the na. 1069 * tailroom can be used to request the allocation of additional 1070 * tailroom bytes after the krings array. This is used by 1071 * netmap_vp_adapter's (i.e., VALE ports) to make room for 1072 * leasing-related data structures 1073 */ 1074int netmap_krings_create(struct netmap_adapter *na, u_int tailroom); 1075/* deletes the kring array of the adapter. The array must have 1076 * been created using netmap_krings_create 1077 */ 1078void netmap_krings_delete(struct netmap_adapter *na); 1079 1080/* set the stopped/enabled status of ring 1081 * When stopping, they also wait for all current activity on the ring to 1082 * terminate. The status change is then notified using the na nm_notify 1083 * callback. 1084 */ 1085void netmap_set_ring(struct netmap_adapter *, u_int ring_id, enum txrx, int stopped); 1086/* set the stopped/enabled status of all rings of the adapter. */ 1087void netmap_set_all_rings(struct netmap_adapter *, int stopped); 1088/* convenience wrappers for netmap_set_all_rings, used in drivers */ 1089void netmap_disable_all_rings(struct ifnet *); 1090void netmap_enable_all_rings(struct ifnet *); 1091 1092int netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na, 1093 uint16_t ringid, uint32_t flags); 1094 1095 1096u_int nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, const char *msg); 1097int netmap_get_na(struct nmreq *nmr, struct netmap_adapter **na, int create); 1098int netmap_get_hw_na(struct ifnet *ifp, struct netmap_adapter **na); 1099 1100 1101#ifdef WITH_VALE 1102/* 1103 * The following bridge-related functions are used by other 1104 * kernel modules. 1105 * 1106 * VALE only supports unicast or broadcast. The lookup 1107 * function can return 0 .. NM_BDG_MAXPORTS-1 for regular ports, 1108 * NM_BDG_MAXPORTS for broadcast, NM_BDG_MAXPORTS+1 for unknown. 1109 * XXX in practice "unknown" might be handled same as broadcast. 1110 */ 1111typedef u_int (*bdg_lookup_fn_t)(struct nm_bdg_fwd *ft, uint8_t *ring_nr, 1112 struct netmap_vp_adapter *); 1113typedef int (*bdg_config_fn_t)(struct nm_ifreq *); 1114typedef void (*bdg_dtor_fn_t)(const struct netmap_vp_adapter *); 1115struct netmap_bdg_ops { 1116 bdg_lookup_fn_t lookup; 1117 bdg_config_fn_t config; 1118 bdg_dtor_fn_t dtor; 1119}; 1120 1121u_int netmap_bdg_learning(struct nm_bdg_fwd *ft, uint8_t *dst_ring, 1122 struct netmap_vp_adapter *); 1123 1124#define NM_BDG_MAXPORTS 254 /* up to 254 */ 1125#define NM_BDG_BROADCAST NM_BDG_MAXPORTS 1126#define NM_BDG_NOPORT (NM_BDG_MAXPORTS+1) 1127 1128#define NM_NAME "vale" /* prefix for bridge port name */ 1129 1130/* these are redefined in case of no VALE support */ 1131int netmap_get_bdg_na(struct nmreq *nmr, struct netmap_adapter **na, int create); 1132struct nm_bridge *netmap_init_bridges2(u_int); 1133void netmap_uninit_bridges2(struct nm_bridge *, u_int); 1134int netmap_init_bridges(void); 1135void netmap_uninit_bridges(void); 1136int netmap_bdg_ctl(struct nmreq *nmr, struct netmap_bdg_ops *bdg_ops); 1137int netmap_bdg_config(struct nmreq *nmr); 1138 1139#else /* !WITH_VALE */ 1140#define netmap_get_bdg_na(_1, _2, _3) 0 1141#define netmap_init_bridges(_1) 0 1142#define netmap_uninit_bridges() 1143#define netmap_bdg_ctl(_1, _2) EINVAL 1144#endif /* !WITH_VALE */ 1145 1146#ifdef WITH_PIPES 1147/* max number of pipes per device */ 1148#define NM_MAXPIPES 64 /* XXX how many? */ 1149void netmap_pipe_dealloc(struct netmap_adapter *); 1150int netmap_get_pipe_na(struct nmreq *nmr, struct netmap_adapter **na, int create); 1151#else /* !WITH_PIPES */ 1152#define NM_MAXPIPES 0 1153#define netmap_pipe_alloc(_1, _2) 0 1154#define netmap_pipe_dealloc(_1) 1155#define netmap_get_pipe_na(nmr, _2, _3) \ 1156 ({ int role__ = (nmr)->nr_flags & NR_REG_MASK; \ 1157 (role__ == NR_REG_PIPE_MASTER || \ 1158 role__ == NR_REG_PIPE_SLAVE) ? EOPNOTSUPP : 0; }) 1159#endif 1160 1161#ifdef WITH_MONITOR 1162int netmap_get_monitor_na(struct nmreq *nmr, struct netmap_adapter **na, int create); 1163void netmap_monitor_stop(struct netmap_adapter *na); 1164#else 1165#define netmap_get_monitor_na(nmr, _2, _3) \ 1166 ((nmr)->nr_flags & (NR_MONITOR_TX | NR_MONITOR_RX) ? EOPNOTSUPP : 0) 1167#endif 1168 1169#ifdef CONFIG_NET_NS 1170struct net *netmap_bns_get(void); 1171void netmap_bns_put(struct net *); 1172void netmap_bns_getbridges(struct nm_bridge **, u_int *); 1173#else 1174#define netmap_bns_get() 1175#define netmap_bns_put(_1) 1176#define netmap_bns_getbridges(b, n) \ 1177 do { *b = nm_bridges; *n = NM_BRIDGES; } while (0) 1178#endif 1179 1180/* Various prototypes */ 1181int netmap_poll(struct cdev *dev, int events, struct thread *td); 1182int netmap_init(void); 1183void netmap_fini(void); 1184int netmap_get_memory(struct netmap_priv_d* p); 1185void netmap_dtor(void *data); 1186int netmap_dtor_locked(struct netmap_priv_d *priv); 1187 1188int netmap_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td); 1189 1190/* netmap_adapter creation/destruction */ 1191 1192// #define NM_DEBUG_PUTGET 1 1193 1194#ifdef NM_DEBUG_PUTGET 1195 1196#define NM_DBG(f) __##f 1197 1198void __netmap_adapter_get(struct netmap_adapter *na); 1199 1200#define netmap_adapter_get(na) \ 1201 do { \ 1202 struct netmap_adapter *__na = na; \ 1203 D("getting %p:%s (%d)", __na, (__na)->name, (__na)->na_refcount); \ 1204 __netmap_adapter_get(__na); \ 1205 } while (0) 1206 1207int __netmap_adapter_put(struct netmap_adapter *na); 1208 1209#define netmap_adapter_put(na) \ 1210 ({ \ 1211 struct netmap_adapter *__na = na; \ 1212 D("putting %p:%s (%d)", __na, (__na)->name, (__na)->na_refcount); \ 1213 __netmap_adapter_put(__na); \ 1214 }) 1215 1216#else /* !NM_DEBUG_PUTGET */ 1217 1218#define NM_DBG(f) f 1219void netmap_adapter_get(struct netmap_adapter *na); 1220int netmap_adapter_put(struct netmap_adapter *na); 1221 1222#endif /* !NM_DEBUG_PUTGET */ 1223 1224 1225/* 1226 * module variables 1227 */ 1228#define NETMAP_BUF_BASE(na) ((na)->na_lut.lut[0].vaddr) 1229#define NETMAP_BUF_SIZE(na) ((na)->na_lut.objsize) 1230extern int netmap_mitigate; // XXX not really used 1231extern int netmap_no_pendintr; 1232extern int netmap_verbose; // XXX debugging 1233enum { /* verbose flags */ 1234 NM_VERB_ON = 1, /* generic verbose */ 1235 NM_VERB_HOST = 0x2, /* verbose host stack */ 1236 NM_VERB_RXSYNC = 0x10, /* verbose on rxsync/txsync */ 1237 NM_VERB_TXSYNC = 0x20, 1238 NM_VERB_RXINTR = 0x100, /* verbose on rx/tx intr (driver) */ 1239 NM_VERB_TXINTR = 0x200, 1240 NM_VERB_NIC_RXSYNC = 0x1000, /* verbose on rx/tx intr (driver) */ 1241 NM_VERB_NIC_TXSYNC = 0x2000, 1242}; 1243 1244extern int netmap_txsync_retry; 1245extern int netmap_generic_mit; 1246extern int netmap_generic_ringsize; 1247extern int netmap_generic_rings; 1248extern int netmap_use_count; 1249 1250/* 1251 * NA returns a pointer to the struct netmap adapter from the ifp, 1252 * WNA is used to write it. 1253 */ 1254#define NA(_ifp) ((struct netmap_adapter *)WNA(_ifp)) 1255 1256/* 1257 * Macros to determine if an interface is netmap capable or netmap enabled. 1258 * See the magic field in struct netmap_adapter. 1259 */ 1260#ifdef __FreeBSD__ 1261/* 1262 * on FreeBSD just use if_capabilities and if_capenable. 1263 */ 1264#define NETMAP_CAPABLE(ifp) (NA(ifp) && \ 1265 (ifp)->if_capabilities & IFCAP_NETMAP ) 1266 1267#define NETMAP_SET_CAPABLE(ifp) \ 1268 (ifp)->if_capabilities |= IFCAP_NETMAP 1269 1270#else /* linux */ 1271 1272/* 1273 * on linux: 1274 * we check if NA(ifp) is set and its first element has a related 1275 * magic value. The capenable is within the struct netmap_adapter. 1276 */ 1277#define NETMAP_MAGIC 0x52697a7a 1278 1279#define NETMAP_CAPABLE(ifp) (NA(ifp) && \ 1280 ((uint32_t)(uintptr_t)NA(ifp) ^ NA(ifp)->magic) == NETMAP_MAGIC ) 1281 1282#define NETMAP_SET_CAPABLE(ifp) \ 1283 NA(ifp)->magic = ((uint32_t)(uintptr_t)NA(ifp)) ^ NETMAP_MAGIC 1284 1285#endif /* linux */ 1286 1287#ifdef __FreeBSD__ 1288 1289/* Assigns the device IOMMU domain to an allocator. 1290 * Returns -ENOMEM in case the domain is different */ 1291#define nm_iommu_group_id(dev) (0) 1292 1293/* Callback invoked by the dma machinery after a successful dmamap_load */ 1294static void netmap_dmamap_cb(__unused void *arg, 1295 __unused bus_dma_segment_t * segs, __unused int nseg, __unused int error) 1296{ 1297} 1298 1299/* bus_dmamap_load wrapper: call aforementioned function if map != NULL. 1300 * XXX can we do it without a callback ? 1301 */ 1302static inline void 1303netmap_load_map(struct netmap_adapter *na, 1304 bus_dma_tag_t tag, bus_dmamap_t map, void *buf) 1305{ 1306 if (map) 1307 bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE(na), 1308 netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT); 1309} 1310 1311static inline void 1312netmap_unload_map(struct netmap_adapter *na, 1313 bus_dma_tag_t tag, bus_dmamap_t map) 1314{ 1315 if (map) 1316 bus_dmamap_unload(tag, map); 1317} 1318 1319/* update the map when a buffer changes. */ 1320static inline void 1321netmap_reload_map(struct netmap_adapter *na, 1322 bus_dma_tag_t tag, bus_dmamap_t map, void *buf) 1323{ 1324 if (map) { 1325 bus_dmamap_unload(tag, map); 1326 bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE(na), 1327 netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT); 1328 } 1329} 1330 1331#else /* linux */ 1332 1333int nm_iommu_group_id(bus_dma_tag_t dev); 1334#include <linux/dma-mapping.h> 1335 1336static inline void 1337netmap_load_map(struct netmap_adapter *na, 1338 bus_dma_tag_t tag, bus_dmamap_t map, void *buf) 1339{ 1340 if (0 && map) { 1341 *map = dma_map_single(na->pdev, buf, na->na_lut.objsize, 1342 DMA_BIDIRECTIONAL); 1343 } 1344} 1345 1346static inline void 1347netmap_unload_map(struct netmap_adapter *na, 1348 bus_dma_tag_t tag, bus_dmamap_t map) 1349{ 1350 u_int sz = na->na_lut.objsize; 1351 1352 if (*map) { 1353 dma_unmap_single(na->pdev, *map, sz, 1354 DMA_BIDIRECTIONAL); 1355 } 1356} 1357 1358static inline void 1359netmap_reload_map(struct netmap_adapter *na, 1360 bus_dma_tag_t tag, bus_dmamap_t map, void *buf) 1361{ 1362 u_int sz = na->na_lut.objsize; 1363 1364 if (*map) { 1365 dma_unmap_single(na->pdev, *map, sz, 1366 DMA_BIDIRECTIONAL); 1367 } 1368 1369 *map = dma_map_single(na->pdev, buf, sz, 1370 DMA_BIDIRECTIONAL); 1371} 1372 1373/* 1374 * XXX How do we redefine these functions: 1375 * 1376 * on linux we need 1377 * dma_map_single(&pdev->dev, virt_addr, len, direction) 1378 * dma_unmap_single(&adapter->pdev->dev, phys_addr, len, direction 1379 * The len can be implicit (on netmap it is NETMAP_BUF_SIZE) 1380 * unfortunately the direction is not, so we need to change 1381 * something to have a cross API 1382 */ 1383 1384#if 0 1385 struct e1000_buffer *buffer_info = &tx_ring->buffer_info[l]; 1386 /* set time_stamp *before* dma to help avoid a possible race */ 1387 buffer_info->time_stamp = jiffies; 1388 buffer_info->mapped_as_page = false; 1389 buffer_info->length = len; 1390 //buffer_info->next_to_watch = l; 1391 /* reload dma map */ 1392 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, 1393 NETMAP_BUF_SIZE, DMA_TO_DEVICE); 1394 buffer_info->dma = dma_map_single(&adapter->pdev->dev, 1395 addr, NETMAP_BUF_SIZE, DMA_TO_DEVICE); 1396 1397 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) { 1398 D("dma mapping error"); 1399 /* goto dma_error; See e1000_put_txbuf() */ 1400 /* XXX reset */ 1401 } 1402 tx_desc->buffer_addr = htole64(buffer_info->dma); //XXX 1403 1404#endif 1405 1406/* 1407 * The bus_dmamap_sync() can be one of wmb() or rmb() depending on direction. 1408 */ 1409#define bus_dmamap_sync(_a, _b, _c) 1410 1411#endif /* linux */ 1412 1413 1414/* 1415 * functions to map NIC to KRING indexes (n2k) and vice versa (k2n) 1416 */ 1417static inline int 1418netmap_idx_n2k(struct netmap_kring *kr, int idx) 1419{ 1420 int n = kr->nkr_num_slots; 1421 idx += kr->nkr_hwofs; 1422 if (idx < 0) 1423 return idx + n; 1424 else if (idx < n) 1425 return idx; 1426 else 1427 return idx - n; 1428} 1429 1430 1431static inline int 1432netmap_idx_k2n(struct netmap_kring *kr, int idx) 1433{ 1434 int n = kr->nkr_num_slots; 1435 idx -= kr->nkr_hwofs; 1436 if (idx < 0) 1437 return idx + n; 1438 else if (idx < n) 1439 return idx; 1440 else 1441 return idx - n; 1442} 1443 1444 1445/* Entries of the look-up table. */ 1446struct lut_entry { 1447 void *vaddr; /* virtual address. */ 1448 vm_paddr_t paddr; /* physical address. */ 1449}; 1450 1451struct netmap_obj_pool; 1452 1453/* 1454 * NMB return the virtual address of a buffer (buffer 0 on bad index) 1455 * PNMB also fills the physical address 1456 */ 1457static inline void * 1458NMB(struct netmap_adapter *na, struct netmap_slot *slot) 1459{ 1460 struct lut_entry *lut = na->na_lut.lut; 1461 uint32_t i = slot->buf_idx; 1462 return (unlikely(i >= na->na_lut.objtotal)) ? 1463 lut[0].vaddr : lut[i].vaddr; 1464} 1465 1466static inline void * 1467PNMB(struct netmap_adapter *na, struct netmap_slot *slot, uint64_t *pp) 1468{ 1469 uint32_t i = slot->buf_idx; 1470 struct lut_entry *lut = na->na_lut.lut; 1471 void *ret = (i >= na->na_lut.objtotal) ? lut[0].vaddr : lut[i].vaddr; 1472 1473 *pp = (i >= na->na_lut.objtotal) ? lut[0].paddr : lut[i].paddr; 1474 return ret; 1475} 1476 1477 1478/* 1479 * Structure associated to each netmap file descriptor. 1480 * It is created on open and left unbound (np_nifp == NULL). 1481 * A successful NIOCREGIF will set np_nifp and the first few fields; 1482 * this is protected by a global lock (NMG_LOCK) due to low contention. 1483 * 1484 * np_refs counts the number of references to the structure: one for the fd, 1485 * plus (on FreeBSD) one for each active mmap which we track ourselves 1486 * (linux automatically tracks them, but FreeBSD does not). 1487 * np_refs is protected by NMG_LOCK. 1488 * 1489 * Read access to the structure is lock free, because ni_nifp once set 1490 * can only go to 0 when nobody is using the entry anymore. Readers 1491 * must check that np_nifp != NULL before using the other fields. 1492 */ 1493struct netmap_priv_d { 1494 struct netmap_if * volatile np_nifp; /* netmap if descriptor. */ 1495 1496 struct netmap_adapter *np_na; 1497 uint32_t np_flags; /* from the ioctl */ 1498 u_int np_qfirst[NR_TXRX], 1499 np_qlast[NR_TXRX]; /* range of tx/rx rings to scan */ 1500 uint16_t np_txpoll; /* XXX and also np_rxpoll ? */ 1501 1502 int np_refs; /* use with NMG_LOCK held */ 1503 1504 /* pointers to the selinfo to be used for selrecord. 1505 * Either the local or the global one depending on the 1506 * number of rings. 1507 */ 1508 NM_SELINFO_T *np_si[NR_TXRX]; 1509 struct thread *np_td; /* kqueue, just debugging */ 1510}; 1511 1512#ifdef WITH_MONITOR 1513 1514struct netmap_monitor_adapter { 1515 struct netmap_adapter up; 1516 1517 struct netmap_priv_d priv; 1518 uint32_t flags; 1519}; 1520 1521#endif /* WITH_MONITOR */ 1522 1523 1524#ifdef WITH_GENERIC 1525/* 1526 * generic netmap emulation for devices that do not have 1527 * native netmap support. 1528 */ 1529int generic_netmap_attach(struct ifnet *ifp); 1530 1531int netmap_catch_rx(struct netmap_generic_adapter *na, int intercept); 1532void generic_rx_handler(struct ifnet *ifp, struct mbuf *m);; 1533void netmap_catch_tx(struct netmap_generic_adapter *na, int enable); 1534int generic_xmit_frame(struct ifnet *ifp, struct mbuf *m, void *addr, u_int len, u_int ring_nr); 1535int generic_find_num_desc(struct ifnet *ifp, u_int *tx, u_int *rx); 1536void generic_find_num_queues(struct ifnet *ifp, u_int *txq, u_int *rxq); 1537static inline struct ifnet* 1538netmap_generic_getifp(struct netmap_generic_adapter *gna) 1539{ 1540 if (gna->prev) 1541 return gna->prev->ifp; 1542 1543 return gna->up.up.ifp; 1544} 1545 1546//#define RATE_GENERIC /* Enables communication statistics for generic. */ 1547#ifdef RATE_GENERIC 1548void generic_rate(int txp, int txs, int txi, int rxp, int rxs, int rxi); 1549#else 1550#define generic_rate(txp, txs, txi, rxp, rxs, rxi) 1551#endif 1552 1553/* 1554 * netmap_mitigation API. This is used by the generic adapter 1555 * to reduce the number of interrupt requests/selwakeup 1556 * to clients on incoming packets. 1557 */ 1558void netmap_mitigation_init(struct nm_generic_mit *mit, int idx, 1559 struct netmap_adapter *na); 1560void netmap_mitigation_start(struct nm_generic_mit *mit); 1561void netmap_mitigation_restart(struct nm_generic_mit *mit); 1562int netmap_mitigation_active(struct nm_generic_mit *mit); 1563void netmap_mitigation_cleanup(struct nm_generic_mit *mit); 1564#endif /* WITH_GENERIC */ 1565 1566 1567 1568/* Shared declarations for the VALE switch. */ 1569 1570/* 1571 * Each transmit queue accumulates a batch of packets into 1572 * a structure before forwarding. Packets to the same 1573 * destination are put in a list using ft_next as a link field. 1574 * ft_frags and ft_next are valid only on the first fragment. 1575 */ 1576struct nm_bdg_fwd { /* forwarding entry for a bridge */ 1577 void *ft_buf; /* netmap or indirect buffer */ 1578 uint8_t ft_frags; /* how many fragments (only on 1st frag) */ 1579 uint8_t _ft_port; /* dst port (unused) */ 1580 uint16_t ft_flags; /* flags, e.g. indirect */ 1581 uint16_t ft_len; /* src fragment len */ 1582 uint16_t ft_next; /* next packet to same destination */ 1583}; 1584 1585/* struct 'virtio_net_hdr' from linux. */ 1586struct nm_vnet_hdr { 1587#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* Use csum_start, csum_offset */ 1588#define VIRTIO_NET_HDR_F_DATA_VALID 2 /* Csum is valid */ 1589 uint8_t flags; 1590#define VIRTIO_NET_HDR_GSO_NONE 0 /* Not a GSO frame */ 1591#define VIRTIO_NET_HDR_GSO_TCPV4 1 /* GSO frame, IPv4 TCP (TSO) */ 1592#define VIRTIO_NET_HDR_GSO_UDP 3 /* GSO frame, IPv4 UDP (UFO) */ 1593#define VIRTIO_NET_HDR_GSO_TCPV6 4 /* GSO frame, IPv6 TCP */ 1594#define VIRTIO_NET_HDR_GSO_ECN 0x80 /* TCP has ECN set */ 1595 uint8_t gso_type; 1596 uint16_t hdr_len; 1597 uint16_t gso_size; 1598 uint16_t csum_start; 1599 uint16_t csum_offset; 1600}; 1601 1602#define WORST_CASE_GSO_HEADER (14+40+60) /* IPv6 + TCP */ 1603 1604/* Private definitions for IPv4, IPv6, UDP and TCP headers. */ 1605 1606struct nm_iphdr { 1607 uint8_t version_ihl; 1608 uint8_t tos; 1609 uint16_t tot_len; 1610 uint16_t id; 1611 uint16_t frag_off; 1612 uint8_t ttl; 1613 uint8_t protocol; 1614 uint16_t check; 1615 uint32_t saddr; 1616 uint32_t daddr; 1617 /*The options start here. */ 1618}; 1619 1620struct nm_tcphdr { 1621 uint16_t source; 1622 uint16_t dest; 1623 uint32_t seq; 1624 uint32_t ack_seq; 1625 uint8_t doff; /* Data offset + Reserved */ 1626 uint8_t flags; 1627 uint16_t window; 1628 uint16_t check; 1629 uint16_t urg_ptr; 1630}; 1631 1632struct nm_udphdr { 1633 uint16_t source; 1634 uint16_t dest; 1635 uint16_t len; 1636 uint16_t check; 1637}; 1638 1639struct nm_ipv6hdr { 1640 uint8_t priority_version; 1641 uint8_t flow_lbl[3]; 1642 1643 uint16_t payload_len; 1644 uint8_t nexthdr; 1645 uint8_t hop_limit; 1646 1647 uint8_t saddr[16]; 1648 uint8_t daddr[16]; 1649}; 1650 1651/* Type used to store a checksum (in host byte order) that hasn't been 1652 * folded yet. 1653 */ 1654#define rawsum_t uint32_t 1655 1656rawsum_t nm_csum_raw(uint8_t *data, size_t len, rawsum_t cur_sum); 1657uint16_t nm_csum_ipv4(struct nm_iphdr *iph); 1658void nm_csum_tcpudp_ipv4(struct nm_iphdr *iph, void *data, 1659 size_t datalen, uint16_t *check); 1660void nm_csum_tcpudp_ipv6(struct nm_ipv6hdr *ip6h, void *data, 1661 size_t datalen, uint16_t *check); 1662uint16_t nm_csum_fold(rawsum_t cur_sum); 1663 1664void bdg_mismatch_datapath(struct netmap_vp_adapter *na, 1665 struct netmap_vp_adapter *dst_na, 1666 struct nm_bdg_fwd *ft_p, struct netmap_ring *ring, 1667 u_int *j, u_int lim, u_int *howmany); 1668 1669/* persistent virtual port routines */ 1670int nm_vi_persist(const char *, struct ifnet **); 1671void nm_vi_detach(struct ifnet *); 1672void nm_vi_init_index(void); 1673 1674#endif /* _NET_NETMAP_KERN_H_ */ 1675