Deleted Added
full compact
netmap_kern.h (274459) netmap_kern.h (285349)
1/*
2 * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved.
3 * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 11 unchanged lines hidden (view full) ---

20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27/*
1/*
2 * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved.
3 * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 11 unchanged lines hidden (view full) ---

20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27/*
28 * $FreeBSD: head/sys/dev/netmap/netmap_kern.h 274459 2014-11-13 00:40:34Z luigi $
28 * $FreeBSD: head/sys/dev/netmap/netmap_kern.h 285349 2015-07-10 05:51:36Z luigi $
29 *
30 * The header contains the definitions of constants and function
31 * prototypes used only in kernelspace.
32 */
33
34#ifndef _NET_NETMAP_KERN_H_
35#define _NET_NETMAP_KERN_H_
36
29 *
30 * The header contains the definitions of constants and function
31 * prototypes used only in kernelspace.
32 */
33
34#ifndef _NET_NETMAP_KERN_H_
35#define _NET_NETMAP_KERN_H_
36
37#if defined(linux)
38
39#if defined(CONFIG_NETMAP_VALE)
40#define WITH_VALE
41#endif
42#if defined(CONFIG_NETMAP_PIPE)
43#define WITH_PIPES
44#endif
45#if defined(CONFIG_NETMAP_MONITOR)
46#define WITH_MONITOR
47#endif
48#if defined(CONFIG_NETMAP_GENERIC)
49#define WITH_GENERIC
50#endif
51#if defined(CONFIG_NETMAP_V1000)
52#define WITH_V1000
53#endif
54
55#else /* not linux */
56
37#define WITH_VALE // comment out to disable VALE support
38#define WITH_PIPES
39#define WITH_MONITOR
40#define WITH_GENERIC
41
57#define WITH_VALE // comment out to disable VALE support
58#define WITH_PIPES
59#define WITH_MONITOR
60#define WITH_GENERIC
61
62#endif
63
42#if defined(__FreeBSD__)
43
44#define likely(x) __builtin_expect((long)!!(x), 1L)
45#define unlikely(x) __builtin_expect((long)!!(x), 0L)
46
64#if defined(__FreeBSD__)
65
66#define likely(x) __builtin_expect((long)!!(x), 1L)
67#define unlikely(x) __builtin_expect((long)!!(x), 0L)
68
47#define NM_LOCK_T struct mtx
69#define NM_LOCK_T struct mtx /* low level spinlock, used to protect queues */
48
70
49/* netmap global lock */
50#define NMG_LOCK_T struct sx
51#define NMG_LOCK_INIT() sx_init(&netmap_global_lock, \
52 "netmap global lock")
53#define NMG_LOCK_DESTROY() sx_destroy(&netmap_global_lock)
54#define NMG_LOCK() sx_xlock(&netmap_global_lock)
55#define NMG_UNLOCK() sx_xunlock(&netmap_global_lock)
56#define NMG_LOCK_ASSERT() sx_assert(&netmap_global_lock, SA_XLOCKED)
71#define NM_MTX_T struct sx /* OS-specific mutex (sleepable) */
72#define NM_MTX_INIT(m) sx_init(&(m), #m)
73#define NM_MTX_DESTROY(m) sx_destroy(&(m))
74#define NM_MTX_LOCK(m) sx_xlock(&(m))
75#define NM_MTX_UNLOCK(m) sx_xunlock(&(m))
76#define NM_MTX_ASSERT(m) sx_assert(&(m), SA_XLOCKED)
57
58#define NM_SELINFO_T struct nm_selinfo
59#define MBUF_LEN(m) ((m)->m_pkthdr.len)
60#define MBUF_IFP(m) ((m)->m_pkthdr.rcvif)
61#define NM_SEND_UP(ifp, m) ((NA(ifp))->if_input)(ifp, m)
62
63#define NM_ATOMIC_T volatile int // XXX ?
64/* atomic operations */

--- 32 unchanged lines hidden (view full) ---

97
98// XXX linux struct, not used in FreeBSD
99struct net_device_ops {
100};
101struct ethtool_ops {
102};
103struct hrtimer {
104};
77
78#define NM_SELINFO_T struct nm_selinfo
79#define MBUF_LEN(m) ((m)->m_pkthdr.len)
80#define MBUF_IFP(m) ((m)->m_pkthdr.rcvif)
81#define NM_SEND_UP(ifp, m) ((NA(ifp))->if_input)(ifp, m)
82
83#define NM_ATOMIC_T volatile int // XXX ?
84/* atomic operations */

--- 32 unchanged lines hidden (view full) ---

117
118// XXX linux struct, not used in FreeBSD
119struct net_device_ops {
120};
121struct ethtool_ops {
122};
123struct hrtimer {
124};
125#define NM_BNS_GET(b)
126#define NM_BNS_PUT(b)
105
106#elif defined (linux)
107
108#define NM_LOCK_T safe_spinlock_t // see bsd_glue.h
109#define NM_SELINFO_T wait_queue_head_t
110#define MBUF_LEN(m) ((m)->len)
111#define MBUF_IFP(m) ((m)->dev)
112#define NM_SEND_UP(ifp, m) \
113 do { \
114 m->priority = NM_MAGIC_PRIORITY_RX; \
115 netif_rx(m); \
116 } while (0)
117
118#define NM_ATOMIC_T volatile long unsigned int
119
127
128#elif defined (linux)
129
130#define NM_LOCK_T safe_spinlock_t // see bsd_glue.h
131#define NM_SELINFO_T wait_queue_head_t
132#define MBUF_LEN(m) ((m)->len)
133#define MBUF_IFP(m) ((m)->dev)
134#define NM_SEND_UP(ifp, m) \
135 do { \
136 m->priority = NM_MAGIC_PRIORITY_RX; \
137 netif_rx(m); \
138 } while (0)
139
140#define NM_ATOMIC_T volatile long unsigned int
141
120#define NM_MTX_T struct mutex
121#define NM_MTX_INIT(m, s) do { (void)s; mutex_init(&(m)); } while (0)
122#define NM_MTX_DESTROY(m) do { (void)m; } while (0)
142#define NM_MTX_T struct mutex /* OS-specific sleepable lock */
143#define NM_MTX_INIT(m) mutex_init(&(m))
144#define NM_MTX_DESTROY(m) do { (void)(m); } while (0)
123#define NM_MTX_LOCK(m) mutex_lock(&(m))
124#define NM_MTX_UNLOCK(m) mutex_unlock(&(m))
145#define NM_MTX_LOCK(m) mutex_lock(&(m))
146#define NM_MTX_UNLOCK(m) mutex_unlock(&(m))
125#define NM_MTX_LOCK_ASSERT(m) mutex_is_locked(&(m))
147#define NM_MTX_ASSERT(m) mutex_is_locked(&(m))
126
148
127#define NMG_LOCK_T NM_MTX_T
128#define NMG_LOCK_INIT() NM_MTX_INIT(netmap_global_lock, \
129 "netmap_global_lock")
130#define NMG_LOCK_DESTROY() NM_MTX_DESTROY(netmap_global_lock)
131#define NMG_LOCK() NM_MTX_LOCK(netmap_global_lock)
132#define NMG_UNLOCK() NM_MTX_UNLOCK(netmap_global_lock)
133#define NMG_LOCK_ASSERT() NM_MTX_LOCK_ASSERT(netmap_global_lock)
134
135#ifndef DEV_NETMAP
136#define DEV_NETMAP
137#endif /* DEV_NETMAP */
138
139#elif defined (__APPLE__)
140
141#warning apple support is incomplete.
142#define likely(x) __builtin_expect(!!(x), 1)

--- 4 unchanged lines hidden (view full) ---

147#define NM_SEND_UP(ifp, m) ((ifp)->if_input)(ifp, m)
148
149#else
150
151#error unsupported platform
152
153#endif /* end - platform-specific code */
154
149#ifndef DEV_NETMAP
150#define DEV_NETMAP
151#endif /* DEV_NETMAP */
152
153#elif defined (__APPLE__)
154
155#warning apple support is incomplete.
156#define likely(x) __builtin_expect(!!(x), 1)

--- 4 unchanged lines hidden (view full) ---

161#define NM_SEND_UP(ifp, m) ((ifp)->if_input)(ifp, m)
162
163#else
164
165#error unsupported platform
166
167#endif /* end - platform-specific code */
168
169#define NMG_LOCK_T NM_MTX_T
170#define NMG_LOCK_INIT() NM_MTX_INIT(netmap_global_lock)
171#define NMG_LOCK_DESTROY() NM_MTX_DESTROY(netmap_global_lock)
172#define NMG_LOCK() NM_MTX_LOCK(netmap_global_lock)
173#define NMG_UNLOCK() NM_MTX_UNLOCK(netmap_global_lock)
174#define NMG_LOCK_ASSERT() NM_MTX_ASSERT(netmap_global_lock)
175
155#define ND(format, ...)
156#define D(format, ...) \
157 do { \
158 struct timeval __xxts; \
159 microtime(&__xxts); \
160 printf("%03d.%06d [%4d] %-25s " format "\n", \
161 (int)__xxts.tv_sec % 1000, (int)__xxts.tv_usec, \
162 __LINE__, __FUNCTION__, ##__VA_ARGS__); \

--- 17 unchanged lines hidden (view full) ---

180struct netmap_priv_d;
181
182const char *nm_dump_buf(char *p, int len, int lim, char *dst);
183
184#include "netmap_mbq.h"
185
186extern NMG_LOCK_T netmap_global_lock;
187
176#define ND(format, ...)
177#define D(format, ...) \
178 do { \
179 struct timeval __xxts; \
180 microtime(&__xxts); \
181 printf("%03d.%06d [%4d] %-25s " format "\n", \
182 (int)__xxts.tv_sec % 1000, (int)__xxts.tv_usec, \
183 __LINE__, __FUNCTION__, ##__VA_ARGS__); \

--- 17 unchanged lines hidden (view full) ---

201struct netmap_priv_d;
202
203const char *nm_dump_buf(char *p, int len, int lim, char *dst);
204
205#include "netmap_mbq.h"
206
207extern NMG_LOCK_T netmap_global_lock;
208
209enum txrx { NR_RX = 0, NR_TX = 1, NR_TXRX };
210
211static __inline const char*
212nm_txrx2str(enum txrx t)
213{
214 return (t== NR_RX ? "RX" : "TX");
215}
216
217static __inline enum txrx
218nm_txrx_swap(enum txrx t)
219{
220 return (t== NR_RX ? NR_TX : NR_RX);
221}
222
223#define for_rx_tx(t) for ((t) = 0; (t) < NR_TXRX; (t)++)
224
225
188/*
189 * private, kernel view of a ring. Keeps track of the status of
190 * a ring across system calls.
191 *
192 * nr_hwcur index of the next buffer to refill.
193 * It corresponds to ring->head
194 * at the time the system call returns.
195 *

--- 58 unchanged lines hidden (view full) ---

254 * *sync_prologue()/finalize() routines.
255 */
256 uint32_t rhead;
257 uint32_t rcur;
258 uint32_t rtail;
259
260 uint32_t nr_kflags; /* private driver flags */
261#define NKR_PENDINTR 0x1 // Pending interrupt.
226/*
227 * private, kernel view of a ring. Keeps track of the status of
228 * a ring across system calls.
229 *
230 * nr_hwcur index of the next buffer to refill.
231 * It corresponds to ring->head
232 * at the time the system call returns.
233 *

--- 58 unchanged lines hidden (view full) ---

292 * *sync_prologue()/finalize() routines.
293 */
294 uint32_t rhead;
295 uint32_t rcur;
296 uint32_t rtail;
297
298 uint32_t nr_kflags; /* private driver flags */
299#define NKR_PENDINTR 0x1 // Pending interrupt.
300#define NKR_EXCLUSIVE 0x2 /* exclusive binding */
262 uint32_t nkr_num_slots;
263
264 /*
265 * On a NIC reset, the NIC ring indexes may be reset but the
266 * indexes in the netmap rings remain the same. nkr_hwofs
267 * keeps track of the offset between the two.
268 */
269 int32_t nkr_hwofs;

--- 33 unchanged lines hidden (view full) ---

303 * (same size as the netmap ring), on rx rings we
304 * store incoming mbufs in a queue that is drained by
305 * a rxsync.
306 */
307 struct mbuf **tx_pool;
308 // u_int nr_ntc; /* Emulation of a next-to-clean RX ring pointer. */
309 struct mbq rx_queue; /* intercepted rx mbufs. */
310
301 uint32_t nkr_num_slots;
302
303 /*
304 * On a NIC reset, the NIC ring indexes may be reset but the
305 * indexes in the netmap rings remain the same. nkr_hwofs
306 * keeps track of the offset between the two.
307 */
308 int32_t nkr_hwofs;

--- 33 unchanged lines hidden (view full) ---

342 * (same size as the netmap ring), on rx rings we
343 * store incoming mbufs in a queue that is drained by
344 * a rxsync.
345 */
346 struct mbuf **tx_pool;
347 // u_int nr_ntc; /* Emulation of a next-to-clean RX ring pointer. */
348 struct mbq rx_queue; /* intercepted rx mbufs. */
349
350 uint32_t users; /* existing bindings for this ring */
351
311 uint32_t ring_id; /* debugging */
352 uint32_t ring_id; /* debugging */
353 enum txrx tx; /* kind of ring (tx or rx) */
312 char name[64]; /* diagnostic */
313
314 /* [tx]sync callback for this kring.
315 * The default nm_kring_create callback (netmap_krings_create)
316 * sets the nm_sync callback of each hardware tx(rx) kring to
317 * the corresponding nm_txsync(nm_rxsync) taken from the
318 * netmap_adapter; moreover, it sets the sync callback
319 * of the host tx(rx) ring to netmap_txsync_to_host
320 * (netmap_rxsync_from_host).
321 *
322 * Overrides: the above configuration is not changed by
323 * any of the nm_krings_create callbacks.
324 */
325 int (*nm_sync)(struct netmap_kring *kring, int flags);
354 char name[64]; /* diagnostic */
355
356 /* [tx]sync callback for this kring.
357 * The default nm_kring_create callback (netmap_krings_create)
358 * sets the nm_sync callback of each hardware tx(rx) kring to
359 * the corresponding nm_txsync(nm_rxsync) taken from the
360 * netmap_adapter; moreover, it sets the sync callback
361 * of the host tx(rx) ring to netmap_txsync_to_host
362 * (netmap_rxsync_from_host).
363 *
364 * Overrides: the above configuration is not changed by
365 * any of the nm_krings_create callbacks.
366 */
367 int (*nm_sync)(struct netmap_kring *kring, int flags);
368 int (*nm_notify)(struct netmap_kring *kring, int flags);
326
327#ifdef WITH_PIPES
328 struct netmap_kring *pipe; /* if this is a pipe ring,
329 * pointer to the other end
330 */
331 struct netmap_ring *save_ring; /* pointer to hidden rings
332 * (see netmap_pipe.c for details)
333 */
334#endif /* WITH_PIPES */
335
369
370#ifdef WITH_PIPES
371 struct netmap_kring *pipe; /* if this is a pipe ring,
372 * pointer to the other end
373 */
374 struct netmap_ring *save_ring; /* pointer to hidden rings
375 * (see netmap_pipe.c for details)
376 */
377#endif /* WITH_PIPES */
378
379#ifdef WITH_VALE
380 int (*save_notify)(struct netmap_kring *kring, int flags);
381#endif
382
336#ifdef WITH_MONITOR
383#ifdef WITH_MONITOR
337 /* pointer to the adapter that is monitoring this kring (if any)
338 */
339 struct netmap_monitor_adapter *monitor;
384 /* array of krings that are monitoring this kring */
385 struct netmap_kring **monitors;
386 uint32_t max_monitors; /* current size of the monitors array */
387 uint32_t n_monitors; /* next unused entry in the monitor array */
340 /*
388 /*
341 * Monitors work by intercepting the txsync and/or rxsync of the
342 * monitored krings. This is implemented by replacing
343 * the nm_sync pointer above and saving the previous
344 * one in save_sync below.
389 * Monitors work by intercepting the sync and notify callbacks of the
390 * monitored krings. This is implemented by replacing the pointers
391 * above and saving the previous ones in mon_* pointers below
345 */
392 */
346 int (*save_sync)(struct netmap_kring *kring, int flags);
393 int (*mon_sync)(struct netmap_kring *kring, int flags);
394 int (*mon_notify)(struct netmap_kring *kring, int flags);
395
396 uint32_t mon_tail; /* last seen slot on rx */
397 uint32_t mon_pos; /* index of this ring in the monitored ring array */
347#endif
348} __attribute__((__aligned__(64)));
349
350
351/* return the next index, with wraparound */
352static inline uint32_t
353nm_next(uint32_t i, uint32_t lim)
354{

--- 54 unchanged lines hidden (view full) ---

409 * nm_kr_space() returns the maximum number of slots that
410 * can be assigned.
411 * nm_kr_lease() reserves the required number of buffers,
412 * advances nkr_hwlease and also returns an entry in
413 * a circular array where completions should be reported.
414 */
415
416
398#endif
399} __attribute__((__aligned__(64)));
400
401
402/* return the next index, with wraparound */
403static inline uint32_t
404nm_next(uint32_t i, uint32_t lim)
405{

--- 54 unchanged lines hidden (view full) ---

460 * nm_kr_space() returns the maximum number of slots that
461 * can be assigned.
462 * nm_kr_lease() reserves the required number of buffers,
463 * advances nkr_hwlease and also returns an entry in
464 * a circular array where completions should be reported.
465 */
466
467
468struct netmap_lut {
469 struct lut_entry *lut;
470 uint32_t objtotal; /* max buffer index */
471 uint32_t objsize; /* buffer size */
472};
417
473
418enum txrx { NR_RX = 0, NR_TX = 1 };
419
420struct netmap_vp_adapter; // forward
421
422/*
423 * The "struct netmap_adapter" extends the "struct adapter"
424 * (or equivalent) device descriptor.
425 * It contains all base fields needed to support netmap operation.
426 * There are in fact different types of netmap adapters
427 * (native, generic, VALE switch...) so a netmap_adapter is

--- 12 unchanged lines hidden (view full) ---

440#define NAF_SKIP_INTR 1 /* use the regular interrupt handler.
441 * useful during initialization
442 */
443#define NAF_SW_ONLY 2 /* forward packets only to sw adapter */
444#define NAF_BDG_MAYSLEEP 4 /* the bridge is allowed to sleep when
445 * forwarding packets coming from this
446 * interface
447 */
474struct netmap_vp_adapter; // forward
475
476/*
477 * The "struct netmap_adapter" extends the "struct adapter"
478 * (or equivalent) device descriptor.
479 * It contains all base fields needed to support netmap operation.
480 * There are in fact different types of netmap adapters
481 * (native, generic, VALE switch...) so a netmap_adapter is

--- 12 unchanged lines hidden (view full) ---

494#define NAF_SKIP_INTR 1 /* use the regular interrupt handler.
495 * useful during initialization
496 */
497#define NAF_SW_ONLY 2 /* forward packets only to sw adapter */
498#define NAF_BDG_MAYSLEEP 4 /* the bridge is allowed to sleep when
499 * forwarding packets coming from this
500 * interface
501 */
448#define NAF_MEM_OWNER 8 /* the adapter is responsible for the
449 * deallocation of the memory allocator
502#define NAF_MEM_OWNER 8 /* the adapter uses its own memory area
503 * that cannot be changed
450 */
504 */
451#define NAF_NATIVE_ON 16 /* the adapter is native and the attached
452 * interface is in netmap mode.
505#define NAF_NATIVE 16 /* the adapter is native.
453 * Virtual ports (vale, pipe, monitor...)
454 * should never use this flag.
455 */
456#define NAF_NETMAP_ON 32 /* netmap is active (either native or
457 * emulated). Where possible (e.g. FreeBSD)
458 * IFCAP_NETMAP also mirrors this flag.
459 */
460#define NAF_HOST_RINGS 64 /* the adapter supports the host rings */
461#define NAF_FORCE_NATIVE 128 /* the adapter is always NATIVE */
462#define NAF_BUSY (1U<<31) /* the adapter is used internally and
463 * cannot be registered from userspace
464 */
465 int active_fds; /* number of user-space descriptors using this
466 interface, which is equal to the number of
467 struct netmap_if objs in the mapped region. */
468
469 u_int num_rx_rings; /* number of adapter receive rings */
470 u_int num_tx_rings; /* number of adapter transmit rings */
471
506 * Virtual ports (vale, pipe, monitor...)
507 * should never use this flag.
508 */
509#define NAF_NETMAP_ON 32 /* netmap is active (either native or
510 * emulated). Where possible (e.g. FreeBSD)
511 * IFCAP_NETMAP also mirrors this flag.
512 */
513#define NAF_HOST_RINGS 64 /* the adapter supports the host rings */
514#define NAF_FORCE_NATIVE 128 /* the adapter is always NATIVE */
515#define NAF_BUSY (1U<<31) /* the adapter is used internally and
516 * cannot be registered from userspace
517 */
518 int active_fds; /* number of user-space descriptors using this
519 interface, which is equal to the number of
520 struct netmap_if objs in the mapped region. */
521
522 u_int num_rx_rings; /* number of adapter receive rings */
523 u_int num_tx_rings; /* number of adapter transmit rings */
524
472 u_int num_tx_desc; /* number of descriptor in each queue */
525 u_int num_tx_desc; /* number of descriptor in each queue */
473 u_int num_rx_desc;
474
475 /* tx_rings and rx_rings are private but allocated
476 * as a contiguous chunk of memory. Each array has
477 * N+1 entries, for the adapter queues and for the host queue.
478 */
479 struct netmap_kring *tx_rings; /* array of TX rings. */
480 struct netmap_kring *rx_rings; /* array of RX rings. */
481
482 void *tailroom; /* space below the rings array */
483 /* (used for leases) */
484
485
526 u_int num_rx_desc;
527
528 /* tx_rings and rx_rings are private but allocated
529 * as a contiguous chunk of memory. Each array has
530 * N+1 entries, for the adapter queues and for the host queue.
531 */
532 struct netmap_kring *tx_rings; /* array of TX rings. */
533 struct netmap_kring *rx_rings; /* array of RX rings. */
534
535 void *tailroom; /* space below the rings array */
536 /* (used for leases) */
537
538
486 NM_SELINFO_T tx_si, rx_si; /* global wait queues */
539 NM_SELINFO_T si[NR_TXRX]; /* global wait queues */
487
488 /* count users of the global wait queues */
540
541 /* count users of the global wait queues */
489 int tx_si_users, rx_si_users;
542 int si_users[NR_TXRX];
490
491 void *pdev; /* used to store pci device */
492
493 /* copy of if_qflush and if_transmit pointers, to intercept
494 * packets from the network stack when netmap is active.
495 */
496 int (*if_transmit)(struct ifnet *, struct mbuf *);
497

--- 41 unchanged lines hidden (view full) ---

539 * we also need to invoke the 'txsync' code downstream.
540 */
541 void (*nm_dtor)(struct netmap_adapter *);
542
543 int (*nm_register)(struct netmap_adapter *, int onoff);
544
545 int (*nm_txsync)(struct netmap_kring *kring, int flags);
546 int (*nm_rxsync)(struct netmap_kring *kring, int flags);
543
544 void *pdev; /* used to store pci device */
545
546 /* copy of if_qflush and if_transmit pointers, to intercept
547 * packets from the network stack when netmap is active.
548 */
549 int (*if_transmit)(struct ifnet *, struct mbuf *);
550

--- 41 unchanged lines hidden (view full) ---

592 * we also need to invoke the 'txsync' code downstream.
593 */
594 void (*nm_dtor)(struct netmap_adapter *);
595
596 int (*nm_register)(struct netmap_adapter *, int onoff);
597
598 int (*nm_txsync)(struct netmap_kring *kring, int flags);
599 int (*nm_rxsync)(struct netmap_kring *kring, int flags);
600 int (*nm_notify)(struct netmap_kring *kring, int flags);
547#define NAF_FORCE_READ 1
548#define NAF_FORCE_RECLAIM 2
549 /* return configuration information */
550 int (*nm_config)(struct netmap_adapter *,
551 u_int *txr, u_int *txd, u_int *rxr, u_int *rxd);
552 int (*nm_krings_create)(struct netmap_adapter *);
553 void (*nm_krings_delete)(struct netmap_adapter *);
601#define NAF_FORCE_READ 1
602#define NAF_FORCE_RECLAIM 2
603 /* return configuration information */
604 int (*nm_config)(struct netmap_adapter *,
605 u_int *txr, u_int *txd, u_int *rxr, u_int *rxd);
606 int (*nm_krings_create)(struct netmap_adapter *);
607 void (*nm_krings_delete)(struct netmap_adapter *);
554 int (*nm_notify)(struct netmap_adapter *,
555 u_int ring, enum txrx, int flags);
556#define NAF_DISABLE_NOTIFY 8 /* notify that the stopped state of the
557 * ring has changed (kring->nkr_stopped)
558 */
559
560#ifdef WITH_VALE
561 /*
562 * nm_bdg_attach() initializes the na_vp field to point
563 * to an adapter that can be attached to a VALE switch. If the
564 * current adapter is already a VALE port, na_vp is simply a cast;
565 * otherwise, na_vp points to a netmap_bwrap_adapter.
566 * If applicable, this callback also initializes na_hostvp,
567 * that can be used to connect the adapter host rings to the

--- 20 unchanged lines hidden (view full) ---

588 */
589 int na_refcount;
590
591 /* memory allocator (opaque)
592 * We also cache a pointer to the lut_entry for translating
593 * buffer addresses, and the total number of buffers.
594 */
595 struct netmap_mem_d *nm_mem;
608#ifdef WITH_VALE
609 /*
610 * nm_bdg_attach() initializes the na_vp field to point
611 * to an adapter that can be attached to a VALE switch. If the
612 * current adapter is already a VALE port, na_vp is simply a cast;
613 * otherwise, na_vp points to a netmap_bwrap_adapter.
614 * If applicable, this callback also initializes na_hostvp,
615 * that can be used to connect the adapter host rings to the

--- 20 unchanged lines hidden (view full) ---

636 */
637 int na_refcount;
638
639 /* memory allocator (opaque)
640 * We also cache a pointer to the lut_entry for translating
641 * buffer addresses, and the total number of buffers.
642 */
643 struct netmap_mem_d *nm_mem;
596 struct lut_entry *na_lut;
597 uint32_t na_lut_objtotal; /* max buffer index */
598 uint32_t na_lut_objsize; /* buffer size */
644 struct netmap_lut na_lut;
599
600 /* additional information attached to this adapter
601 * by other netmap subsystems. Currently used by
602 * bwrap and LINUX/v1000.
603 */
604 void *na_private;
605
645
646 /* additional information attached to this adapter
647 * by other netmap subsystems. Currently used by
648 * bwrap and LINUX/v1000.
649 */
650 void *na_private;
651
606#ifdef WITH_PIPES
607 /* array of pipes that have this adapter as a parent */
608 struct netmap_pipe_adapter **na_pipes;
609 int na_next_pipe; /* next free slot in the array */
610 int na_max_pipes; /* size of the array */
652 /* array of pipes that have this adapter as a parent */
653 struct netmap_pipe_adapter **na_pipes;
654 int na_next_pipe; /* next free slot in the array */
655 int na_max_pipes; /* size of the array */
611#endif /* WITH_PIPES */
612
613 char name[64];
614};
615
656
657 char name[64];
658};
659
660static __inline u_int
661nma_get_ndesc(struct netmap_adapter *na, enum txrx t)
662{
663 return (t == NR_TX ? na->num_tx_desc : na->num_rx_desc);
664}
616
665
666static __inline void
667nma_set_ndesc(struct netmap_adapter *na, enum txrx t, u_int v)
668{
669 if (t == NR_TX)
670 na->num_tx_desc = v;
671 else
672 na->num_rx_desc = v;
673}
674
675static __inline u_int
676nma_get_nrings(struct netmap_adapter *na, enum txrx t)
677{
678 return (t == NR_TX ? na->num_tx_rings : na->num_rx_rings);
679}
680
681static __inline void
682nma_set_nrings(struct netmap_adapter *na, enum txrx t, u_int v)
683{
684 if (t == NR_TX)
685 na->num_tx_rings = v;
686 else
687 na->num_rx_rings = v;
688}
689
690static __inline struct netmap_kring*
691NMR(struct netmap_adapter *na, enum txrx t)
692{
693 return (t == NR_TX ? na->tx_rings : na->rx_rings);
694}
695
617/*
618 * If the NIC is owned by the kernel
619 * (i.e., bridge), neither another bridge nor user can use it;
620 * if the NIC is owned by a user, only users can share it.
621 * Evaluation must be done under NMG_LOCK().
622 */
623#define NETMAP_OWNED_BY_KERN(na) ((na)->na_flags & NAF_BUSY)
624#define NETMAP_OWNED_BY_ANY(na) \
625 (NETMAP_OWNED_BY_KERN(na) || ((na)->active_fds > 0))
626
696/*
697 * If the NIC is owned by the kernel
698 * (i.e., bridge), neither another bridge nor user can use it;
699 * if the NIC is owned by a user, only users can share it.
700 * Evaluation must be done under NMG_LOCK().
701 */
702#define NETMAP_OWNED_BY_KERN(na) ((na)->na_flags & NAF_BUSY)
703#define NETMAP_OWNED_BY_ANY(na) \
704 (NETMAP_OWNED_BY_KERN(na) || ((na)->active_fds > 0))
705
627
628/*
629 * derived netmap adapters for various types of ports
630 */
631struct netmap_vp_adapter { /* VALE software port */
632 struct netmap_adapter up;
633
634 /*
635 * Bridge support:

--- 4 unchanged lines hidden (view full) ---

640 int bdg_port;
641 struct nm_bridge *na_bdg;
642 int retry;
643
644 /* Offset of ethernet header for each packet. */
645 u_int virt_hdr_len;
646 /* Maximum Frame Size, used in bdg_mismatch_datapath() */
647 u_int mfs;
706/*
707 * derived netmap adapters for various types of ports
708 */
709struct netmap_vp_adapter { /* VALE software port */
710 struct netmap_adapter up;
711
712 /*
713 * Bridge support:

--- 4 unchanged lines hidden (view full) ---

718 int bdg_port;
719 struct nm_bridge *na_bdg;
720 int retry;
721
722 /* Offset of ethernet header for each packet. */
723 u_int virt_hdr_len;
724 /* Maximum Frame Size, used in bdg_mismatch_datapath() */
725 u_int mfs;
726 /* Last source MAC on this port */
727 uint64_t last_smac;
648};
649
650
651struct netmap_hw_adapter { /* physical device */
652 struct netmap_adapter up;
653
654 struct net_device_ops nm_ndo; // XXX linux only
655 struct ethtool_ops nm_eto; // XXX linux only

--- 28 unchanged lines hidden (view full) ---

684 struct nm_generic_mit *mit;
685#ifdef linux
686 netdev_tx_t (*save_start_xmit)(struct mbuf *, struct ifnet *);
687#endif
688};
689#endif /* WITH_GENERIC */
690
691static __inline int
728};
729
730
731struct netmap_hw_adapter { /* physical device */
732 struct netmap_adapter up;
733
734 struct net_device_ops nm_ndo; // XXX linux only
735 struct ethtool_ops nm_eto; // XXX linux only

--- 28 unchanged lines hidden (view full) ---

764 struct nm_generic_mit *mit;
765#ifdef linux
766 netdev_tx_t (*save_start_xmit)(struct mbuf *, struct ifnet *);
767#endif
768};
769#endif /* WITH_GENERIC */
770
771static __inline int
692netmap_real_tx_rings(struct netmap_adapter *na)
772netmap_real_rings(struct netmap_adapter *na, enum txrx t)
693{
773{
694 return na->num_tx_rings + !!(na->na_flags & NAF_HOST_RINGS);
774 return nma_get_nrings(na, t) + !!(na->na_flags & NAF_HOST_RINGS);
695}
696
775}
776
697static __inline int
698netmap_real_rx_rings(struct netmap_adapter *na)
699{
700 return na->num_rx_rings + !!(na->na_flags & NAF_HOST_RINGS);
701}
702
703#ifdef WITH_VALE
704
705/*
706 * Bridge wrapper for non VALE ports attached to a VALE switch.
707 *
708 * The real device must already have its own netmap adapter (hwna).
709 * The bridge wrapper and the hwna adapter share the same set of
710 * netmap rings and buffers, but they have two separate sets of

--- 35 unchanged lines hidden (view full) ---

746 * but are diverted to the host adapter depending on the ring number.
747 *
748 */
749struct netmap_bwrap_adapter {
750 struct netmap_vp_adapter up;
751 struct netmap_vp_adapter host; /* for host rings */
752 struct netmap_adapter *hwna; /* the underlying device */
753
777#ifdef WITH_VALE
778
779/*
780 * Bridge wrapper for non VALE ports attached to a VALE switch.
781 *
782 * The real device must already have its own netmap adapter (hwna).
783 * The bridge wrapper and the hwna adapter share the same set of
784 * netmap rings and buffers, but they have two separate sets of

--- 35 unchanged lines hidden (view full) ---

820 * but are diverted to the host adapter depending on the ring number.
821 *
822 */
823struct netmap_bwrap_adapter {
824 struct netmap_vp_adapter up;
825 struct netmap_vp_adapter host; /* for host rings */
826 struct netmap_adapter *hwna; /* the underlying device */
827
754 /* backup of the hwna notify callback */
755 int (*save_notify)(struct netmap_adapter *,
756 u_int ring, enum txrx, int flags);
757 /* backup of the hwna memory allocator */
758 struct netmap_mem_d *save_nmd;
759
760 /*
761 * When we attach a physical interface to the bridge, we
762 * allow the controlling process to terminate, so we need
763 * a place to store the n_detmap_priv_d data structure.
764 * This is only done when physical interfaces

--- 77 unchanged lines hidden (view full) ---

842 if (unlikely(kr->nkr_stopped)) {
843 ND("ring %p stopped (%d)", kr, kr->nkr_stopped);
844 nm_kr_put(kr);
845 return NM_KR_STOPPED;
846 }
847 return 0;
848}
849
828 /* backup of the hwna memory allocator */
829 struct netmap_mem_d *save_nmd;
830
831 /*
832 * When we attach a physical interface to the bridge, we
833 * allow the controlling process to terminate, so we need
834 * a place to store the n_detmap_priv_d data structure.
835 * This is only done when physical interfaces

--- 77 unchanged lines hidden (view full) ---

913 if (unlikely(kr->nkr_stopped)) {
914 ND("ring %p stopped (%d)", kr, kr->nkr_stopped);
915 nm_kr_put(kr);
916 return NM_KR_STOPPED;
917 }
918 return 0;
919}
920
921static __inline void nm_kr_get(struct netmap_kring *kr)
922{
923 while (NM_ATOMIC_TEST_AND_SET(&kr->nr_busy))
924 tsleep(kr, 0, "NM_KR_GET", 4);
925}
850
926
927
928
929
851/*
852 * The following functions are used by individual drivers to
853 * support netmap operation.
854 *
855 * netmap_attach() initializes a struct netmap_adapter, allocating the
856 * struct netmap_ring's and the struct selinfo.
857 *
858 * netmap_detach() frees the memory allocated by netmap_attach().

--- 32 unchanged lines hidden (view full) ---

891#define netmap_vp_to_ifp(_vp) NULL
892#define netmap_ifp_to_vp(_ifp) NULL
893#define netmap_ifp_to_host_vp(_ifp) NULL
894#define netmap_bdg_idx(_vp) -1
895#define netmap_bdg_name(_vp) NULL
896#endif /* WITH_VALE */
897
898static inline int
930/*
931 * The following functions are used by individual drivers to
932 * support netmap operation.
933 *
934 * netmap_attach() initializes a struct netmap_adapter, allocating the
935 * struct netmap_ring's and the struct selinfo.
936 *
937 * netmap_detach() frees the memory allocated by netmap_attach().

--- 32 unchanged lines hidden (view full) ---

970#define netmap_vp_to_ifp(_vp) NULL
971#define netmap_ifp_to_vp(_ifp) NULL
972#define netmap_ifp_to_host_vp(_ifp) NULL
973#define netmap_bdg_idx(_vp) -1
974#define netmap_bdg_name(_vp) NULL
975#endif /* WITH_VALE */
976
977static inline int
899nm_native_on(struct netmap_adapter *na)
978nm_netmap_on(struct netmap_adapter *na)
900{
979{
901 return na && na->na_flags & NAF_NATIVE_ON;
980 return na && na->na_flags & NAF_NETMAP_ON;
902}
903
904static inline int
981}
982
983static inline int
905nm_netmap_on(struct netmap_adapter *na)
984nm_native_on(struct netmap_adapter *na)
906{
985{
907 return na && na->na_flags & NAF_NETMAP_ON;
986 return nm_netmap_on(na) && (na->na_flags & NAF_NATIVE);
908}
909
910/* set/clear native flags and if_transmit/netdev_ops */
911static inline void
912nm_set_native_flags(struct netmap_adapter *na)
913{
914 struct ifnet *ifp = na->ifp;
915
987}
988
989/* set/clear native flags and if_transmit/netdev_ops */
990static inline void
991nm_set_native_flags(struct netmap_adapter *na)
992{
993 struct ifnet *ifp = na->ifp;
994
916 na->na_flags |= (NAF_NATIVE_ON | NAF_NETMAP_ON);
995 na->na_flags |= NAF_NETMAP_ON;
917#ifdef IFCAP_NETMAP /* or FreeBSD ? */
918 ifp->if_capenable |= IFCAP_NETMAP;
919#endif
920#ifdef __FreeBSD__
921 na->if_transmit = ifp->if_transmit;
922 ifp->if_transmit = netmap_transmit;
923#else
924 na->if_transmit = (void *)ifp->netdev_ops;

--- 10 unchanged lines hidden (view full) ---

935 struct ifnet *ifp = na->ifp;
936
937#ifdef __FreeBSD__
938 ifp->if_transmit = na->if_transmit;
939#else
940 ifp->netdev_ops = (void *)na->if_transmit;
941 ifp->ethtool_ops = ((struct netmap_hw_adapter*)na)->save_ethtool;
942#endif
996#ifdef IFCAP_NETMAP /* or FreeBSD ? */
997 ifp->if_capenable |= IFCAP_NETMAP;
998#endif
999#ifdef __FreeBSD__
1000 na->if_transmit = ifp->if_transmit;
1001 ifp->if_transmit = netmap_transmit;
1002#else
1003 na->if_transmit = (void *)ifp->netdev_ops;

--- 10 unchanged lines hidden (view full) ---

1014 struct ifnet *ifp = na->ifp;
1015
1016#ifdef __FreeBSD__
1017 ifp->if_transmit = na->if_transmit;
1018#else
1019 ifp->netdev_ops = (void *)na->if_transmit;
1020 ifp->ethtool_ops = ((struct netmap_hw_adapter*)na)->save_ethtool;
1021#endif
943 na->na_flags &= ~(NAF_NATIVE_ON | NAF_NETMAP_ON);
1022 na->na_flags &= ~NAF_NETMAP_ON;
944#ifdef IFCAP_NETMAP /* or FreeBSD ? */
945 ifp->if_capenable &= ~IFCAP_NETMAP;
946#endif
947}
948
949
1023#ifdef IFCAP_NETMAP /* or FreeBSD ? */
1024 ifp->if_capenable &= ~IFCAP_NETMAP;
1025#endif
1026}
1027
1028
950/*
951 * validates parameters in the ring/kring, returns a value for head
952 * If any error, returns ring_size to force a reinit.
953 */
954uint32_t nm_txsync_prologue(struct netmap_kring *);
955
956
957/*
958 * validates parameters in the ring/kring, returns a value for head,
959 * and the 'reserved' value in the argument.
960 * If any error, returns ring_size lim to force a reinit.
961 */
962uint32_t nm_rxsync_prologue(struct netmap_kring *);
963
964
965/*
966 * update kring and ring at the end of txsync.
967 */
968static inline void
969nm_txsync_finalize(struct netmap_kring *kring)
970{
971 /* update ring tail to what the kernel knows */
972 kring->ring->tail = kring->rtail = kring->nr_hwtail;
973
974 /* note, head/rhead/hwcur might be behind cur/rcur
975 * if no carrier
976 */
977 ND(5, "%s now hwcur %d hwtail %d head %d cur %d tail %d",
978 kring->name, kring->nr_hwcur, kring->nr_hwtail,
979 kring->rhead, kring->rcur, kring->rtail);
980}
981
982
983/*
984 * update kring and ring at the end of rxsync
985 */
986static inline void
987nm_rxsync_finalize(struct netmap_kring *kring)
988{
989 /* tell userspace that there might be new packets */
990 //struct netmap_ring *ring = kring->ring;
991 ND("head %d cur %d tail %d -> %d", ring->head, ring->cur, ring->tail,
992 kring->nr_hwtail);
993 kring->ring->tail = kring->rtail = kring->nr_hwtail;
994 /* make a copy of the state for next round */
995 kring->rhead = kring->ring->head;
996 kring->rcur = kring->ring->cur;
997}
998
999
1000/* check/fix address and len in tx rings */
1001#if 1 /* debug version */
1002#define NM_CHECK_ADDR_LEN(_na, _a, _l) do { \
1003 if (_a == NETMAP_BUF_BASE(_na) || _l > NETMAP_BUF_SIZE(_na)) { \
1004 RD(5, "bad addr/len ring %d slot %d idx %d len %d", \
1005 kring->ring_id, nm_i, slot->buf_idx, len); \
1006 if (_l > NETMAP_BUF_SIZE(_na)) \
1007 _l = NETMAP_BUF_SIZE(_na); \

--- 37 unchanged lines hidden (view full) ---

1045 * netmap_vp_adapter's (i.e., VALE ports) to make room for
1046 * leasing-related data structures
1047 */
1048int netmap_krings_create(struct netmap_adapter *na, u_int tailroom);
1049/* deletes the kring array of the adapter. The array must have
1050 * been created using netmap_krings_create
1051 */
1052void netmap_krings_delete(struct netmap_adapter *na);
1029/* check/fix address and len in tx rings */
1030#if 1 /* debug version */
1031#define NM_CHECK_ADDR_LEN(_na, _a, _l) do { \
1032 if (_a == NETMAP_BUF_BASE(_na) || _l > NETMAP_BUF_SIZE(_na)) { \
1033 RD(5, "bad addr/len ring %d slot %d idx %d len %d", \
1034 kring->ring_id, nm_i, slot->buf_idx, len); \
1035 if (_l > NETMAP_BUF_SIZE(_na)) \
1036 _l = NETMAP_BUF_SIZE(_na); \

--- 37 unchanged lines hidden (view full) ---

1074 * netmap_vp_adapter's (i.e., VALE ports) to make room for
1075 * leasing-related data structures
1076 */
1077int netmap_krings_create(struct netmap_adapter *na, u_int tailroom);
1078/* deletes the kring array of the adapter. The array must have
1079 * been created using netmap_krings_create
1080 */
1081void netmap_krings_delete(struct netmap_adapter *na);
1082int netmap_rxsync_from_host(struct netmap_adapter *na, struct thread *td, void *pwait);
1053
1083
1084
1054/* set the stopped/enabled status of ring
1055 * When stopping, they also wait for all current activity on the ring to
1056 * terminate. The status change is then notified using the na nm_notify
1057 * callback.
1058 */
1085/* set the stopped/enabled status of ring
1086 * When stopping, they also wait for all current activity on the ring to
1087 * terminate. The status change is then notified using the na nm_notify
1088 * callback.
1089 */
1059void netmap_set_txring(struct netmap_adapter *, u_int ring_id, int stopped);
1060void netmap_set_rxring(struct netmap_adapter *, u_int ring_id, int stopped);
1090void netmap_set_ring(struct netmap_adapter *, u_int ring_id, enum txrx, int stopped);
1061/* set the stopped/enabled status of all rings of the adapter. */
1062void netmap_set_all_rings(struct netmap_adapter *, int stopped);
1063/* convenience wrappers for netmap_set_all_rings, used in drivers */
1064void netmap_disable_all_rings(struct ifnet *);
1065void netmap_enable_all_rings(struct ifnet *);
1066
1067int netmap_rxsync_from_host(struct netmap_adapter *na, struct thread *td, void *pwait);
1068
1091/* set the stopped/enabled status of all rings of the adapter. */
1092void netmap_set_all_rings(struct netmap_adapter *, int stopped);
1093/* convenience wrappers for netmap_set_all_rings, used in drivers */
1094void netmap_disable_all_rings(struct ifnet *);
1095void netmap_enable_all_rings(struct ifnet *);
1096
1097int netmap_rxsync_from_host(struct netmap_adapter *na, struct thread *td, void *pwait);
1098
1069struct netmap_if *
1099int
1070netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na,
1100netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na,
1071 uint16_t ringid, uint32_t flags, int *err);
1101 uint16_t ringid, uint32_t flags);
1072
1073
1074
1075u_int nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, const char *msg);
1076int netmap_get_na(struct nmreq *nmr, struct netmap_adapter **na, int create);
1077int netmap_get_hw_na(struct ifnet *ifp, struct netmap_adapter **na);
1078
1079
1080#ifdef WITH_VALE
1081/*
1082 * The following bridge-related functions are used by other
1083 * kernel modules.
1084 *
1085 * VALE only supports unicast or broadcast. The lookup
1086 * function can return 0 .. NM_BDG_MAXPORTS-1 for regular ports,
1087 * NM_BDG_MAXPORTS for broadcast, NM_BDG_MAXPORTS+1 for unknown.
1088 * XXX in practice "unknown" might be handled same as broadcast.
1089 */
1090typedef u_int (*bdg_lookup_fn_t)(struct nm_bdg_fwd *ft, uint8_t *ring_nr,
1102
1103
1104
1105u_int nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, const char *msg);
1106int netmap_get_na(struct nmreq *nmr, struct netmap_adapter **na, int create);
1107int netmap_get_hw_na(struct ifnet *ifp, struct netmap_adapter **na);
1108
1109
1110#ifdef WITH_VALE
1111/*
1112 * The following bridge-related functions are used by other
1113 * kernel modules.
1114 *
1115 * VALE only supports unicast or broadcast. The lookup
1116 * function can return 0 .. NM_BDG_MAXPORTS-1 for regular ports,
1117 * NM_BDG_MAXPORTS for broadcast, NM_BDG_MAXPORTS+1 for unknown.
1118 * XXX in practice "unknown" might be handled same as broadcast.
1119 */
1120typedef u_int (*bdg_lookup_fn_t)(struct nm_bdg_fwd *ft, uint8_t *ring_nr,
1091 const struct netmap_vp_adapter *);
1121 struct netmap_vp_adapter *);
1092typedef int (*bdg_config_fn_t)(struct nm_ifreq *);
1093typedef void (*bdg_dtor_fn_t)(const struct netmap_vp_adapter *);
1094struct netmap_bdg_ops {
1095 bdg_lookup_fn_t lookup;
1096 bdg_config_fn_t config;
1097 bdg_dtor_fn_t dtor;
1098};
1099
1100u_int netmap_bdg_learning(struct nm_bdg_fwd *ft, uint8_t *dst_ring,
1122typedef int (*bdg_config_fn_t)(struct nm_ifreq *);
1123typedef void (*bdg_dtor_fn_t)(const struct netmap_vp_adapter *);
1124struct netmap_bdg_ops {
1125 bdg_lookup_fn_t lookup;
1126 bdg_config_fn_t config;
1127 bdg_dtor_fn_t dtor;
1128};
1129
1130u_int netmap_bdg_learning(struct nm_bdg_fwd *ft, uint8_t *dst_ring,
1101 const struct netmap_vp_adapter *);
1131 struct netmap_vp_adapter *);
1102
1103#define NM_BDG_MAXPORTS 254 /* up to 254 */
1104#define NM_BDG_BROADCAST NM_BDG_MAXPORTS
1105#define NM_BDG_NOPORT (NM_BDG_MAXPORTS+1)
1106
1107#define NM_NAME "vale" /* prefix for bridge port name */
1108
1109/* these are redefined in case of no VALE support */
1110int netmap_get_bdg_na(struct nmreq *nmr, struct netmap_adapter **na, int create);
1132
1133#define NM_BDG_MAXPORTS 254 /* up to 254 */
1134#define NM_BDG_BROADCAST NM_BDG_MAXPORTS
1135#define NM_BDG_NOPORT (NM_BDG_MAXPORTS+1)
1136
1137#define NM_NAME "vale" /* prefix for bridge port name */
1138
1139/* these are redefined in case of no VALE support */
1140int netmap_get_bdg_na(struct nmreq *nmr, struct netmap_adapter **na, int create);
1111void netmap_init_bridges(void);
1141struct nm_bridge *netmap_init_bridges2(u_int);
1142void netmap_uninit_bridges2(struct nm_bridge *, u_int);
1143int netmap_init_bridges(void);
1144void netmap_uninit_bridges(void);
1112int netmap_bdg_ctl(struct nmreq *nmr, struct netmap_bdg_ops *bdg_ops);
1113int netmap_bdg_config(struct nmreq *nmr);
1114
1115#else /* !WITH_VALE */
1116#define netmap_get_bdg_na(_1, _2, _3) 0
1145int netmap_bdg_ctl(struct nmreq *nmr, struct netmap_bdg_ops *bdg_ops);
1146int netmap_bdg_config(struct nmreq *nmr);
1147
1148#else /* !WITH_VALE */
1149#define netmap_get_bdg_na(_1, _2, _3) 0
1117#define netmap_init_bridges(_1)
1150#define netmap_init_bridges(_1) 0
1151#define netmap_uninit_bridges()
1118#define netmap_bdg_ctl(_1, _2) EINVAL
1119#endif /* !WITH_VALE */
1120
1121#ifdef WITH_PIPES
1122/* max number of pipes per device */
1123#define NM_MAXPIPES 64 /* XXX how many? */
1152#define netmap_bdg_ctl(_1, _2) EINVAL
1153#endif /* !WITH_VALE */
1154
1155#ifdef WITH_PIPES
1156/* max number of pipes per device */
1157#define NM_MAXPIPES 64 /* XXX how many? */
1124/* in case of no error, returns the actual number of pipes in nmr->nr_arg1 */
1125int netmap_pipe_alloc(struct netmap_adapter *, struct nmreq *nmr);
1126void netmap_pipe_dealloc(struct netmap_adapter *);
1127int netmap_get_pipe_na(struct nmreq *nmr, struct netmap_adapter **na, int create);
1128#else /* !WITH_PIPES */
1129#define NM_MAXPIPES 0
1158void netmap_pipe_dealloc(struct netmap_adapter *);
1159int netmap_get_pipe_na(struct nmreq *nmr, struct netmap_adapter **na, int create);
1160#else /* !WITH_PIPES */
1161#define NM_MAXPIPES 0
1130#define netmap_pipe_alloc(_1, _2) EOPNOTSUPP
1162#define netmap_pipe_alloc(_1, _2) 0
1131#define netmap_pipe_dealloc(_1)
1163#define netmap_pipe_dealloc(_1)
1132#define netmap_get_pipe_na(_1, _2, _3) 0
1164#define netmap_get_pipe_na(nmr, _2, _3) \
1165 ({ int role__ = (nmr)->nr_flags & NR_REG_MASK; \
1166 (role__ == NR_REG_PIPE_MASTER || \
1167 role__ == NR_REG_PIPE_SLAVE) ? EOPNOTSUPP : 0; })
1133#endif
1134
1135#ifdef WITH_MONITOR
1136int netmap_get_monitor_na(struct nmreq *nmr, struct netmap_adapter **na, int create);
1168#endif
1169
1170#ifdef WITH_MONITOR
1171int netmap_get_monitor_na(struct nmreq *nmr, struct netmap_adapter **na, int create);
1172void netmap_monitor_stop(struct netmap_adapter *na);
1137#else
1173#else
1138#define netmap_get_monitor_na(_1, _2, _3) 0
1174#define netmap_get_monitor_na(nmr, _2, _3) \
1175 ((nmr)->nr_flags & (NR_MONITOR_TX | NR_MONITOR_RX) ? EOPNOTSUPP : 0)
1139#endif
1140
1176#endif
1177
1178#ifdef CONFIG_NET_NS
1179struct net *netmap_bns_get(void);
1180void netmap_bns_put(struct net *);
1181void netmap_bns_getbridges(struct nm_bridge **, u_int *);
1182#else
1183#define netmap_bns_get()
1184#define netmap_bns_put(_1)
1185#define netmap_bns_getbridges(b, n) \
1186 do { *b = nm_bridges; *n = NM_BRIDGES; } while (0)
1187#endif
1188
1141/* Various prototypes */
1142int netmap_poll(struct cdev *dev, int events, struct thread *td);
1143int netmap_init(void);
1144void netmap_fini(void);
1145int netmap_get_memory(struct netmap_priv_d* p);
1146void netmap_dtor(void *data);
1147int netmap_dtor_locked(struct netmap_priv_d *priv);
1148

--- 32 unchanged lines hidden (view full) ---

1181int netmap_adapter_put(struct netmap_adapter *na);
1182
1183#endif /* !NM_DEBUG_PUTGET */
1184
1185
1186/*
1187 * module variables
1188 */
1189/* Various prototypes */
1190int netmap_poll(struct cdev *dev, int events, struct thread *td);
1191int netmap_init(void);
1192void netmap_fini(void);
1193int netmap_get_memory(struct netmap_priv_d* p);
1194void netmap_dtor(void *data);
1195int netmap_dtor_locked(struct netmap_priv_d *priv);
1196

--- 32 unchanged lines hidden (view full) ---

1229int netmap_adapter_put(struct netmap_adapter *na);
1230
1231#endif /* !NM_DEBUG_PUTGET */
1232
1233
1234/*
1235 * module variables
1236 */
1189#define NETMAP_BUF_BASE(na) ((na)->na_lut[0].vaddr)
1190#define NETMAP_BUF_SIZE(na) ((na)->na_lut_objsize)
1237#define NETMAP_BUF_BASE(na) ((na)->na_lut.lut[0].vaddr)
1238#define NETMAP_BUF_SIZE(na) ((na)->na_lut.objsize)
1191extern int netmap_mitigate; // XXX not really used
1192extern int netmap_no_pendintr;
1193extern int netmap_verbose; // XXX debugging
1194enum { /* verbose flags */
1195 NM_VERB_ON = 1, /* generic verbose */
1196 NM_VERB_HOST = 0x2, /* verbose host stack */
1197 NM_VERB_RXSYNC = 0x10, /* verbose on rxsync/txsync */
1198 NM_VERB_TXSYNC = 0x20,

--- 87 unchanged lines hidden (view full) ---

1286 bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE(na),
1287 netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT);
1288 }
1289}
1290
1291#else /* linux */
1292
1293int nm_iommu_group_id(bus_dma_tag_t dev);
1239extern int netmap_mitigate; // XXX not really used
1240extern int netmap_no_pendintr;
1241extern int netmap_verbose; // XXX debugging
1242enum { /* verbose flags */
1243 NM_VERB_ON = 1, /* generic verbose */
1244 NM_VERB_HOST = 0x2, /* verbose host stack */
1245 NM_VERB_RXSYNC = 0x10, /* verbose on rxsync/txsync */
1246 NM_VERB_TXSYNC = 0x20,

--- 87 unchanged lines hidden (view full) ---

1334 bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE(na),
1335 netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT);
1336 }
1337}
1338
1339#else /* linux */
1340
1341int nm_iommu_group_id(bus_dma_tag_t dev);
1294extern size_t netmap_mem_get_bufsize(struct netmap_mem_d *);
1295#include <linux/dma-mapping.h>
1296
1297static inline void
1298netmap_load_map(struct netmap_adapter *na,
1299 bus_dma_tag_t tag, bus_dmamap_t map, void *buf)
1300{
1342#include <linux/dma-mapping.h>
1343
1344static inline void
1345netmap_load_map(struct netmap_adapter *na,
1346 bus_dma_tag_t tag, bus_dmamap_t map, void *buf)
1347{
1301 if (map) {
1302 *map = dma_map_single(na->pdev, buf, netmap_mem_get_bufsize(na->nm_mem),
1348 if (0 && map) {
1349 *map = dma_map_single(na->pdev, buf, na->na_lut.objsize,
1303 DMA_BIDIRECTIONAL);
1304 }
1305}
1306
1307static inline void
1308netmap_unload_map(struct netmap_adapter *na,
1309 bus_dma_tag_t tag, bus_dmamap_t map)
1310{
1350 DMA_BIDIRECTIONAL);
1351 }
1352}
1353
1354static inline void
1355netmap_unload_map(struct netmap_adapter *na,
1356 bus_dma_tag_t tag, bus_dmamap_t map)
1357{
1311 u_int sz = netmap_mem_get_bufsize(na->nm_mem);
1358 u_int sz = na->na_lut.objsize;
1312
1313 if (*map) {
1314 dma_unmap_single(na->pdev, *map, sz,
1315 DMA_BIDIRECTIONAL);
1316 }
1317}
1318
1319static inline void
1320netmap_reload_map(struct netmap_adapter *na,
1321 bus_dma_tag_t tag, bus_dmamap_t map, void *buf)
1322{
1359
1360 if (*map) {
1361 dma_unmap_single(na->pdev, *map, sz,
1362 DMA_BIDIRECTIONAL);
1363 }
1364}
1365
1366static inline void
1367netmap_reload_map(struct netmap_adapter *na,
1368 bus_dma_tag_t tag, bus_dmamap_t map, void *buf)
1369{
1323 u_int sz = netmap_mem_get_bufsize(na->nm_mem);
1370 u_int sz = na->na_lut.objsize;
1324
1325 if (*map) {
1326 dma_unmap_single(na->pdev, *map, sz,
1327 DMA_BIDIRECTIONAL);
1328 }
1329
1330 *map = dma_map_single(na->pdev, buf, sz,
1331 DMA_BIDIRECTIONAL);

--- 81 unchanged lines hidden (view full) ---

1413
1414/*
1415 * NMB return the virtual address of a buffer (buffer 0 on bad index)
1416 * PNMB also fills the physical address
1417 */
1418static inline void *
1419NMB(struct netmap_adapter *na, struct netmap_slot *slot)
1420{
1371
1372 if (*map) {
1373 dma_unmap_single(na->pdev, *map, sz,
1374 DMA_BIDIRECTIONAL);
1375 }
1376
1377 *map = dma_map_single(na->pdev, buf, sz,
1378 DMA_BIDIRECTIONAL);

--- 81 unchanged lines hidden (view full) ---

1460
1461/*
1462 * NMB return the virtual address of a buffer (buffer 0 on bad index)
1463 * PNMB also fills the physical address
1464 */
1465static inline void *
1466NMB(struct netmap_adapter *na, struct netmap_slot *slot)
1467{
1421 struct lut_entry *lut = na->na_lut;
1468 struct lut_entry *lut = na->na_lut.lut;
1422 uint32_t i = slot->buf_idx;
1469 uint32_t i = slot->buf_idx;
1423 return (unlikely(i >= na->na_lut_objtotal)) ?
1470 return (unlikely(i >= na->na_lut.objtotal)) ?
1424 lut[0].vaddr : lut[i].vaddr;
1425}
1426
1427static inline void *
1428PNMB(struct netmap_adapter *na, struct netmap_slot *slot, uint64_t *pp)
1429{
1430 uint32_t i = slot->buf_idx;
1471 lut[0].vaddr : lut[i].vaddr;
1472}
1473
1474static inline void *
1475PNMB(struct netmap_adapter *na, struct netmap_slot *slot, uint64_t *pp)
1476{
1477 uint32_t i = slot->buf_idx;
1431 struct lut_entry *lut = na->na_lut;
1432 void *ret = (i >= na->na_lut_objtotal) ? lut[0].vaddr : lut[i].vaddr;
1478 struct lut_entry *lut = na->na_lut.lut;
1479 void *ret = (i >= na->na_lut.objtotal) ? lut[0].vaddr : lut[i].vaddr;
1433
1480
1434 *pp = (i >= na->na_lut_objtotal) ? lut[0].paddr : lut[i].paddr;
1481 *pp = (i >= na->na_lut.objtotal) ? lut[0].paddr : lut[i].paddr;
1435 return ret;
1436}
1437
1438/* Generic version of NMB, which uses device-specific memory. */
1439
1440
1441
1442void netmap_txsync_to_host(struct netmap_adapter *na);

--- 11 unchanged lines hidden (view full) ---

1454 * since initialization involves the allocation of memory,
1455 * we reuse the memory allocator lock.
1456 *
1457 * Read access to the structure is lock free. Readers must check that
1458 * np_nifp is not NULL before using the other fields.
1459 * If np_nifp is NULL initialization has not been performed,
1460 * so they should return an error to userspace.
1461 *
1482 return ret;
1483}
1484
1485/* Generic version of NMB, which uses device-specific memory. */
1486
1487
1488
1489void netmap_txsync_to_host(struct netmap_adapter *na);

--- 11 unchanged lines hidden (view full) ---

1501 * since initialization involves the allocation of memory,
1502 * we reuse the memory allocator lock.
1503 *
1504 * Read access to the structure is lock free. Readers must check that
1505 * np_nifp is not NULL before using the other fields.
1506 * If np_nifp is NULL initialization has not been performed,
1507 * so they should return an error to userspace.
1508 *
1462 * The ref_done field is used to regulate access to the refcount in the
1509 * The ref_done field (XXX ?) is used to regulate access to the refcount in the
1463 * memory allocator. The refcount must be incremented at most once for
1464 * each open("/dev/netmap"). The increment is performed by the first
1465 * function that calls netmap_get_memory() (currently called by
1466 * mmap(), NIOCGINFO and NIOCREGIF).
1467 * If the refcount is incremented, it is then decremented when the
1468 * private structure is destroyed.
1469 */
1470struct netmap_priv_d {
1471 struct netmap_if * volatile np_nifp; /* netmap if descriptor. */
1472
1473 struct netmap_adapter *np_na;
1474 uint32_t np_flags; /* from the ioctl */
1510 * memory allocator. The refcount must be incremented at most once for
1511 * each open("/dev/netmap"). The increment is performed by the first
1512 * function that calls netmap_get_memory() (currently called by
1513 * mmap(), NIOCGINFO and NIOCREGIF).
1514 * If the refcount is incremented, it is then decremented when the
1515 * private structure is destroyed.
1516 */
1517struct netmap_priv_d {
1518 struct netmap_if * volatile np_nifp; /* netmap if descriptor. */
1519
1520 struct netmap_adapter *np_na;
1521 uint32_t np_flags; /* from the ioctl */
1475 u_int np_txqfirst, np_txqlast; /* range of tx rings to scan */
1476 u_int np_rxqfirst, np_rxqlast; /* range of rx rings to scan */
1522 u_int np_qfirst[NR_TXRX],
1523 np_qlast[NR_TXRX]; /* range of tx/rx rings to scan */
1477 uint16_t np_txpoll; /* XXX and also np_rxpoll ? */
1478
1524 uint16_t np_txpoll; /* XXX and also np_rxpoll ? */
1525
1479 struct netmap_mem_d *np_mref; /* use with NMG_LOCK held */
1480 /* np_refcount is only used on FreeBSD */
1481 int np_refcount; /* use with NMG_LOCK held */
1482
1483 /* pointers to the selinfo to be used for selrecord.
1484 * Either the local or the global one depending on the
1485 * number of rings.
1486 */
1526 /* np_refcount is only used on FreeBSD */
1527 int np_refcount; /* use with NMG_LOCK held */
1528
1529 /* pointers to the selinfo to be used for selrecord.
1530 * Either the local or the global one depending on the
1531 * number of rings.
1532 */
1487 NM_SELINFO_T *np_rxsi, *np_txsi;
1533 NM_SELINFO_T *np_si[NR_TXRX];
1488 struct thread *np_td; /* kqueue, just debugging */
1489};
1490
1491#ifdef WITH_MONITOR
1492
1493struct netmap_monitor_adapter {
1494 struct netmap_adapter up;
1495

--- 6 unchanged lines hidden (view full) ---

1502
1503#ifdef WITH_GENERIC
1504/*
1505 * generic netmap emulation for devices that do not have
1506 * native netmap support.
1507 */
1508int generic_netmap_attach(struct ifnet *ifp);
1509
1534 struct thread *np_td; /* kqueue, just debugging */
1535};
1536
1537#ifdef WITH_MONITOR
1538
1539struct netmap_monitor_adapter {
1540 struct netmap_adapter up;
1541

--- 6 unchanged lines hidden (view full) ---

1548
1549#ifdef WITH_GENERIC
1550/*
1551 * generic netmap emulation for devices that do not have
1552 * native netmap support.
1553 */
1554int generic_netmap_attach(struct ifnet *ifp);
1555
1510int netmap_catch_rx(struct netmap_adapter *na, int intercept);
1556int netmap_catch_rx(struct netmap_generic_adapter *na, int intercept);
1511void generic_rx_handler(struct ifnet *ifp, struct mbuf *m);;
1512void netmap_catch_tx(struct netmap_generic_adapter *na, int enable);
1513int generic_xmit_frame(struct ifnet *ifp, struct mbuf *m, void *addr, u_int len, u_int ring_nr);
1514int generic_find_num_desc(struct ifnet *ifp, u_int *tx, u_int *rx);
1515void generic_find_num_queues(struct ifnet *ifp, u_int *txq, u_int *rxq);
1557void generic_rx_handler(struct ifnet *ifp, struct mbuf *m);;
1558void netmap_catch_tx(struct netmap_generic_adapter *na, int enable);
1559int generic_xmit_frame(struct ifnet *ifp, struct mbuf *m, void *addr, u_int len, u_int ring_nr);
1560int generic_find_num_desc(struct ifnet *ifp, u_int *tx, u_int *rx);
1561void generic_find_num_queues(struct ifnet *ifp, u_int *txq, u_int *rxq);
1562static inline struct ifnet*
1563netmap_generic_getifp(struct netmap_generic_adapter *gna)
1564{
1565 if (gna->prev)
1566 return gna->prev->ifp;
1516
1567
1568 return gna->up.up.ifp;
1569}
1570
1517//#define RATE_GENERIC /* Enables communication statistics for generic. */
1518#ifdef RATE_GENERIC
1519void generic_rate(int txp, int txs, int txi, int rxp, int rxs, int rxi);
1520#else
1521#define generic_rate(txp, txs, txi, rxp, rxs, rxi)
1522#endif
1523
1524/*

--- 121 unchanged lines hidden ---
1571//#define RATE_GENERIC /* Enables communication statistics for generic. */
1572#ifdef RATE_GENERIC
1573void generic_rate(int txp, int txs, int txi, int rxp, int rxs, int rxi);
1574#else
1575#define generic_rate(txp, txs, txi, rxp, rxs, rxi)
1576#endif
1577
1578/*

--- 121 unchanged lines hidden ---