Deleted Added
full compact
netmap_kern.h (257529) netmap_kern.h (259412)
1/*
2 * Copyright (C) 2011-2013 Matteo Landi, Luigi Rizzo. All rights reserved.
1/*
2 * Copyright (C) 2011-2013 Matteo Landi, Luigi Rizzo. All rights reserved.
3 * Copyright (C) 2013 Universita` di Pisa. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the

--- 8 unchanged lines hidden (view full) ---

19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26/*
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the

--- 8 unchanged lines hidden (view full) ---

20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27/*
27 * $FreeBSD: head/sys/dev/netmap/netmap_kern.h 257529 2013-11-01 21:21:14Z luigi $
28 * $FreeBSD: head/sys/dev/netmap/netmap_kern.h 259412 2013-12-15 08:37:24Z luigi $
28 *
29 * The header contains the definitions of constants and function
30 * prototypes used only in kernelspace.
31 */
32
33#ifndef _NET_NETMAP_KERN_H_
34#define _NET_NETMAP_KERN_H_
35
29 *
30 * The header contains the definitions of constants and function
31 * prototypes used only in kernelspace.
32 */
33
34#ifndef _NET_NETMAP_KERN_H_
35#define _NET_NETMAP_KERN_H_
36
37#define WITH_VALE // comment out to disable VALE support
38
36#if defined(__FreeBSD__)
37
38#define likely(x) __builtin_expect((long)!!(x), 1L)
39#define unlikely(x) __builtin_expect((long)!!(x), 0L)
40
41#define NM_LOCK_T struct mtx
39#if defined(__FreeBSD__)
40
41#define likely(x) __builtin_expect((long)!!(x), 1L)
42#define unlikely(x) __builtin_expect((long)!!(x), 0L)
43
44#define NM_LOCK_T struct mtx
45#define NMG_LOCK_T struct mtx
46#define NMG_LOCK_INIT() mtx_init(&netmap_global_lock, \
47 "netmap global lock", NULL, MTX_DEF)
48#define NMG_LOCK_DESTROY() mtx_destroy(&netmap_global_lock)
49#define NMG_LOCK() mtx_lock(&netmap_global_lock)
50#define NMG_UNLOCK() mtx_unlock(&netmap_global_lock)
51#define NMG_LOCK_ASSERT() mtx_assert(&netmap_global_lock, MA_OWNED)
52
42#define NM_SELINFO_T struct selinfo
43#define MBUF_LEN(m) ((m)->m_pkthdr.len)
53#define NM_SELINFO_T struct selinfo
54#define MBUF_LEN(m) ((m)->m_pkthdr.len)
55#define MBUF_IFP(m) ((m)->m_pkthdr.rcvif)
44#define NM_SEND_UP(ifp, m) ((ifp)->if_input)(ifp, m)
45
56#define NM_SEND_UP(ifp, m) ((ifp)->if_input)(ifp, m)
57
46#define NM_ATOMIC_T volatile int
58#define NM_ATOMIC_T volatile int // XXX ?
59/* atomic operations */
60#include <machine/atomic.h>
61#define NM_ATOMIC_TEST_AND_SET(p) (!atomic_cmpset_acq_int((p), 0, 1))
62#define NM_ATOMIC_CLEAR(p) atomic_store_rel_int((p), 0)
47
63
64#define prefetch(x) __builtin_prefetch(x)
65
66MALLOC_DECLARE(M_NETMAP);
67
68// XXX linux struct, not used in FreeBSD
69struct net_device_ops {
70};
71struct hrtimer {
72};
73
48#elif defined (linux)
49
50#define NM_LOCK_T safe_spinlock_t // see bsd_glue.h
51#define NM_SELINFO_T wait_queue_head_t
52#define MBUF_LEN(m) ((m)->len)
74#elif defined (linux)
75
76#define NM_LOCK_T safe_spinlock_t // see bsd_glue.h
77#define NM_SELINFO_T wait_queue_head_t
78#define MBUF_LEN(m) ((m)->len)
79#define MBUF_IFP(m) ((m)->dev)
53#define NM_SEND_UP(ifp, m) netif_rx(m)
54
55#define NM_ATOMIC_T volatile long unsigned int
56
80#define NM_SEND_UP(ifp, m) netif_rx(m)
81
82#define NM_ATOMIC_T volatile long unsigned int
83
84// XXX a mtx would suffice here too 20130404 gl
85#define NMG_LOCK_T struct semaphore
86#define NMG_LOCK_INIT() sema_init(&netmap_global_lock, 1)
87#define NMG_LOCK_DESTROY()
88#define NMG_LOCK() down(&netmap_global_lock)
89#define NMG_UNLOCK() up(&netmap_global_lock)
90#define NMG_LOCK_ASSERT() // XXX to be completed
91
57#ifndef DEV_NETMAP
58#define DEV_NETMAP
59#endif /* DEV_NETMAP */
60
61/*
62 * IFCAP_NETMAP goes into net_device's priv_flags (if_capenable).
63 * This was 16 bits up to linux 2.6.36, so we need a 16 bit value on older
64 * platforms and tolerate the clash with IFF_DYNAMIC and IFF_BRIDGE_PORT.

--- 45 unchanged lines hidden (view full) ---

110
111struct netmap_adapter;
112struct nm_bdg_fwd;
113struct nm_bridge;
114struct netmap_priv_d;
115
116const char *nm_dump_buf(char *p, int len, int lim, char *dst);
117
92#ifndef DEV_NETMAP
93#define DEV_NETMAP
94#endif /* DEV_NETMAP */
95
96/*
97 * IFCAP_NETMAP goes into net_device's priv_flags (if_capenable).
98 * This was 16 bits up to linux 2.6.36, so we need a 16 bit value on older
99 * platforms and tolerate the clash with IFF_DYNAMIC and IFF_BRIDGE_PORT.

--- 45 unchanged lines hidden (view full) ---

145
146struct netmap_adapter;
147struct nm_bdg_fwd;
148struct nm_bridge;
149struct netmap_priv_d;
150
151const char *nm_dump_buf(char *p, int len, int lim, char *dst);
152
153#include "netmap_mbq.h"
154
155extern NMG_LOCK_T netmap_global_lock;
156
118/*
119 * private, kernel view of a ring. Keeps track of the status of
120 * a ring across system calls.
121 *
122 * nr_hwcur index of the next buffer to refill.
123 * It corresponds to ring->cur - ring->reserved
124 *
125 * nr_hwavail the number of slots "owned" by userspace.

--- 21 unchanged lines hidden (view full) ---

147 * copy outside the lock.
148 * In RX rings (used for VALE ports),
149 * nkr_hwcur + nkr_hwavail <= nkr_hwlease < nkr_hwcur+N-1
150 * In TX rings (used for NIC or host stack ports)
151 * nkr_hwcur <= nkr_hwlease < nkr_hwcur+ nkr_hwavail
152 * nkr_leases array of nkr_num_slots where writers can report
153 * completion of their block. NR_NOSLOT (~0) indicates
154 * that the writer has not finished yet
157/*
158 * private, kernel view of a ring. Keeps track of the status of
159 * a ring across system calls.
160 *
161 * nr_hwcur index of the next buffer to refill.
162 * It corresponds to ring->cur - ring->reserved
163 *
164 * nr_hwavail the number of slots "owned" by userspace.

--- 21 unchanged lines hidden (view full) ---

186 * copy outside the lock.
187 * In RX rings (used for VALE ports),
188 * nkr_hwcur + nkr_hwavail <= nkr_hwlease < nkr_hwcur+N-1
189 * In TX rings (used for NIC or host stack ports)
190 * nkr_hwcur <= nkr_hwlease < nkr_hwcur+ nkr_hwavail
191 * nkr_leases array of nkr_num_slots where writers can report
192 * completion of their block. NR_NOSLOT (~0) indicates
193 * that the writer has not finished yet
155 * nkr_lease_idx index of next free slot in nr_leases, to be assigned
194 * nkr_lease_idx index of next free slot in nr_leases, to be assigned
156 *
157 * The kring is manipulated by txsync/rxsync and generic netmap function.
158 * q_lock is used to arbitrate access to the kring from within the netmap
159 * code, and this and other protections guarantee that there is never
160 * more than 1 concurrent call to txsync or rxsync. So we are free
161 * to manipulate the kring from within txsync/rxsync without any extra
162 * locks.
163 */
164struct netmap_kring {
165 struct netmap_ring *ring;
166 uint32_t nr_hwcur;
167 uint32_t nr_hwavail;
168 uint32_t nr_kflags; /* private driver flags */
195 *
196 * The kring is manipulated by txsync/rxsync and generic netmap function.
197 * q_lock is used to arbitrate access to the kring from within the netmap
198 * code, and this and other protections guarantee that there is never
199 * more than 1 concurrent call to txsync or rxsync. So we are free
200 * to manipulate the kring from within txsync/rxsync without any extra
201 * locks.
202 */
203struct netmap_kring {
204 struct netmap_ring *ring;
205 uint32_t nr_hwcur;
206 uint32_t nr_hwavail;
207 uint32_t nr_kflags; /* private driver flags */
208 int32_t nr_hwreserved;
169#define NKR_PENDINTR 0x1 // Pending interrupt.
170 uint32_t nkr_num_slots;
171 int32_t nkr_hwofs; /* offset between NIC and netmap ring */
172
173 uint16_t nkr_slot_flags; /* initial value for flags */
174 struct netmap_adapter *na;
175 struct nm_bdg_fwd *nkr_ft;
176 uint32_t *nkr_leases;
177#define NR_NOSLOT ((uint32_t)~0)
178 uint32_t nkr_hwlease;
179 uint32_t nkr_lease_idx;
180
181 NM_SELINFO_T si; /* poll/select wait queue */
182 NM_LOCK_T q_lock; /* protects kring and ring. */
183 NM_ATOMIC_T nr_busy; /* prevent concurrent syscalls */
184
185 volatile int nkr_stopped;
209#define NKR_PENDINTR 0x1 // Pending interrupt.
210 uint32_t nkr_num_slots;
211 int32_t nkr_hwofs; /* offset between NIC and netmap ring */
212
213 uint16_t nkr_slot_flags; /* initial value for flags */
214 struct netmap_adapter *na;
215 struct nm_bdg_fwd *nkr_ft;
216 uint32_t *nkr_leases;
217#define NR_NOSLOT ((uint32_t)~0)
218 uint32_t nkr_hwlease;
219 uint32_t nkr_lease_idx;
220
221 NM_SELINFO_T si; /* poll/select wait queue */
222 NM_LOCK_T q_lock; /* protects kring and ring. */
223 NM_ATOMIC_T nr_busy; /* prevent concurrent syscalls */
224
225 volatile int nkr_stopped;
226
227 /* support for adapters without native netmap support.
228 * On tx rings we preallocate an array of tx buffers
229 * (same size as the netmap ring), on rx rings we
230 * store incoming packets in a queue.
231 * XXX who writes to the rx queue ?
232 */
233 struct mbuf **tx_pool;
234 u_int nr_ntc; /* Emulation of a next-to-clean RX ring pointer. */
235 struct mbq rx_queue; /* A queue for intercepted rx mbufs. */
236
186} __attribute__((__aligned__(64)));
187
188
189/* return the next index, with wraparound */
190static inline uint32_t
191nm_next(uint32_t i, uint32_t lim)
192{
193 return unlikely (i == lim) ? 0 : i + 1;

--- 46 unchanged lines hidden (view full) ---

240 * nm_kr_lease() reserves the required number of buffers,
241 * advances nkr_hwlease and also returns an entry in
242 * a circular array where completions should be reported.
243 */
244
245
246
247
237} __attribute__((__aligned__(64)));
238
239
240/* return the next index, with wraparound */
241static inline uint32_t
242nm_next(uint32_t i, uint32_t lim)
243{
244 return unlikely (i == lim) ? 0 : i + 1;

--- 46 unchanged lines hidden (view full) ---

291 * nm_kr_lease() reserves the required number of buffers,
292 * advances nkr_hwlease and also returns an entry in
293 * a circular array where completions should be reported.
294 */
295
296
297
298
299enum txrx { NR_RX = 0, NR_TX = 1 };
248
249/*
300
301/*
250 * This struct extends the 'struct adapter' (or
251 * equivalent) device descriptor. It contains all fields needed to
252 * support netmap operation.
302 * The "struct netmap_adapter" extends the "struct adapter"
303 * (or equivalent) device descriptor.
304 * It contains all base fields needed to support netmap operation.
305 * There are in fact different types of netmap adapters
306 * (native, generic, VALE switch...) so a netmap_adapter is
307 * just the first field in the derived type.
253 */
254struct netmap_adapter {
255 /*
256 * On linux we do not have a good way to tell if an interface
308 */
309struct netmap_adapter {
310 /*
311 * On linux we do not have a good way to tell if an interface
257 * is netmap-capable. So we use the following trick:
312 * is netmap-capable. So we always use the following trick:
258 * NA(ifp) points here, and the first entry (which hopefully
259 * always exists and is at least 32 bits) contains a magic
260 * value which we can use to detect that the interface is good.
261 */
262 uint32_t magic;
313 * NA(ifp) points here, and the first entry (which hopefully
314 * always exists and is at least 32 bits) contains a magic
315 * value which we can use to detect that the interface is good.
316 */
317 uint32_t magic;
263 uint32_t na_flags; /* future place for IFCAP_NETMAP */
318 uint32_t na_flags; /* enabled, and other flags */
264#define NAF_SKIP_INTR 1 /* use the regular interrupt handler.
265 * useful during initialization
266 */
267#define NAF_SW_ONLY 2 /* forward packets only to sw adapter */
268#define NAF_BDG_MAYSLEEP 4 /* the bridge is allowed to sleep when
269 * forwarding packets coming from this
270 * interface
271 */
272#define NAF_MEM_OWNER 8 /* the adapter is responsible for the
273 * deallocation of the memory allocator
274 */
319#define NAF_SKIP_INTR 1 /* use the regular interrupt handler.
320 * useful during initialization
321 */
322#define NAF_SW_ONLY 2 /* forward packets only to sw adapter */
323#define NAF_BDG_MAYSLEEP 4 /* the bridge is allowed to sleep when
324 * forwarding packets coming from this
325 * interface
326 */
327#define NAF_MEM_OWNER 8 /* the adapter is responsible for the
328 * deallocation of the memory allocator
329 */
275 int refcount; /* number of user-space descriptors using this
330#define NAF_NATIVE_ON 16 /* the adapter is native and the attached
331 * interface is in netmap mode
332 */
333#define NAF_NETMAP_ON 32 /* netmap is active (either native or
334 * emulated. Where possible (e.g. FreeBSD)
335 * IFCAP_NETMAP also mirrors this flag.
336 */
337 int active_fds; /* number of user-space descriptors using this
276 interface, which is equal to the number of
277 struct netmap_if objs in the mapped region. */
338 interface, which is equal to the number of
339 struct netmap_if objs in the mapped region. */
278 /*
279 * The selwakeup in the interrupt thread can use per-ring
280 * and/or global wait queues. We track how many clients
281 * of each type we have so we can optimize the drivers,
282 * and especially avoid huge contention on the locks.
283 */
284 int na_single; /* threads attached to a single hw queue */
285 int na_multi; /* threads attached to multiple hw queues */
286
287 u_int num_rx_rings; /* number of adapter receive rings */
288 u_int num_tx_rings; /* number of adapter transmit rings */
289
290 u_int num_tx_desc; /* number of descriptor in each queue */
291 u_int num_rx_desc;
292
293 /* tx_rings and rx_rings are private but allocated
294 * as a contiguous chunk of memory. Each array has
295 * N+1 entries, for the adapter queues and for the host queue.
296 */
297 struct netmap_kring *tx_rings; /* array of TX rings. */
298 struct netmap_kring *rx_rings; /* array of RX rings. */
340
341 u_int num_rx_rings; /* number of adapter receive rings */
342 u_int num_tx_rings; /* number of adapter transmit rings */
343
344 u_int num_tx_desc; /* number of descriptor in each queue */
345 u_int num_rx_desc;
346
347 /* tx_rings and rx_rings are private but allocated
348 * as a contiguous chunk of memory. Each array has
349 * N+1 entries, for the adapter queues and for the host queue.
350 */
351 struct netmap_kring *tx_rings; /* array of TX rings. */
352 struct netmap_kring *rx_rings; /* array of RX rings. */
353 void *tailroom; /* space below the rings array */
354 /* (used for leases) */
299
355
356
300 NM_SELINFO_T tx_si, rx_si; /* global wait queues */
301
302 /* copy of if_qflush and if_transmit pointers, to intercept
303 * packets from the network stack when netmap is active.
304 */
305 int (*if_transmit)(struct ifnet *, struct mbuf *);
306
307 /* references to the ifnet and device routines, used by
308 * the generic netmap functions.
309 */
310 struct ifnet *ifp; /* adapter is ifp->if_softc */
311
357 NM_SELINFO_T tx_si, rx_si; /* global wait queues */
358
359 /* copy of if_qflush and if_transmit pointers, to intercept
360 * packets from the network stack when netmap is active.
361 */
362 int (*if_transmit)(struct ifnet *, struct mbuf *);
363
364 /* references to the ifnet and device routines, used by
365 * the generic netmap functions.
366 */
367 struct ifnet *ifp; /* adapter is ifp->if_softc */
368
312 NM_LOCK_T core_lock; /* used if no device lock available */
369 /* private cleanup */
370 void (*nm_dtor)(struct netmap_adapter *);
313
371
314 int (*nm_register)(struct ifnet *, int onoff);
372 int (*nm_register)(struct netmap_adapter *, int onoff);
315
373
316 int (*nm_txsync)(struct ifnet *, u_int ring, int flags);
317 int (*nm_rxsync)(struct ifnet *, u_int ring, int flags);
374 int (*nm_txsync)(struct netmap_adapter *, u_int ring, int flags);
375 int (*nm_rxsync)(struct netmap_adapter *, u_int ring, int flags);
318#define NAF_FORCE_READ 1
319#define NAF_FORCE_RECLAIM 2
320 /* return configuration information */
376#define NAF_FORCE_READ 1
377#define NAF_FORCE_RECLAIM 2
378 /* return configuration information */
321 int (*nm_config)(struct ifnet *, u_int *txr, u_int *txd,
322 u_int *rxr, u_int *rxd);
379 int (*nm_config)(struct netmap_adapter *,
380 u_int *txr, u_int *txd, u_int *rxr, u_int *rxd);
381 int (*nm_krings_create)(struct netmap_adapter *);
382 void (*nm_krings_delete)(struct netmap_adapter *);
383 int (*nm_notify)(struct netmap_adapter *,
384 u_int ring, enum txrx, int flags);
385#define NAF_GLOBAL_NOTIFY 4
386#define NAF_DISABLE_NOTIFY 8
323
387
388 /* standard refcount to control the lifetime of the adapter
389 * (it should be equal to the lifetime of the corresponding ifp)
390 */
391 int na_refcount;
392
393 /* memory allocator (opaque)
394 * We also cache a pointer to the lut_entry for translating
395 * buffer addresses, and the total number of buffers.
396 */
397 struct netmap_mem_d *nm_mem;
398 struct lut_entry *na_lut;
399 uint32_t na_lut_objtotal; /* max buffer index */
400
401 /* used internally. If non-null, the interface cannot be bound
402 * from userspace
403 */
404 void *na_private;
405};
406
407/*
408 * If the NIC is owned by the kernel
409 * (i.e., bridge), neither another bridge nor user can use it;
410 * if the NIC is owned by a user, only users can share it.
411 * Evaluation must be done under NMG_LOCK().
412 */
413#define NETMAP_OWNED_BY_KERN(na) (na->na_private)
414#define NETMAP_OWNED_BY_ANY(na) \
415 (NETMAP_OWNED_BY_KERN(na) || (na->active_fds > 0))
416
417
418/*
419 * derived netmap adapters for various types of ports
420 */
421struct netmap_vp_adapter { /* VALE software port */
422 struct netmap_adapter up;
423
324 /*
325 * Bridge support:
326 *
327 * bdg_port is the port number used in the bridge;
424 /*
425 * Bridge support:
426 *
427 * bdg_port is the port number used in the bridge;
328 * na_bdg_refcount is a refcount used for bridge ports,
329 * when it goes to 0 we can detach+free this port
330 * (a bridge port is always attached if it exists;
331 * it is not always registered)
332 * na_bdg points to the bridge this NA is attached to.
333 */
334 int bdg_port;
428 * na_bdg points to the bridge this NA is attached to.
429 */
430 int bdg_port;
335 int na_bdg_refcount;
336 struct nm_bridge *na_bdg;
431 struct nm_bridge *na_bdg;
432 int retry;
433
434 u_int offset; /* Offset of ethernet header for each packet. */
435};
436
437struct netmap_hw_adapter { /* physical device */
438 struct netmap_adapter up;
439
440 struct net_device_ops nm_ndo; // XXX linux only
441};
442
443struct netmap_generic_adapter { /* non-native device */
444 struct netmap_hw_adapter up;
445
446 /* Pointer to a previously used netmap adapter. */
447 struct netmap_adapter *prev;
448
449 /* generic netmap adapters support:
450 * a net_device_ops struct overrides ndo_select_queue(),
451 * save_if_input saves the if_input hook (FreeBSD),
452 * mit_timer and mit_pending implement rx interrupt mitigation,
453 */
454 struct net_device_ops generic_ndo;
455 void (*save_if_input)(struct ifnet *, struct mbuf *);
456
457 struct hrtimer mit_timer;
458 int mit_pending;
459};
460
461#ifdef WITH_VALE
462
463/* bridge wrapper for non VALE ports. It is used to connect real devices to the bridge.
464 *
465 * The real device must already have its own netmap adapter (hwna). The
466 * bridge wrapper and the hwna adapter share the same set of netmap rings and
467 * buffers, but they have two separate sets of krings descriptors, with tx/rx
468 * meanings swapped:
469 *
470 * netmap
471 * bwrap krings rings krings hwna
472 * +------+ +------+ +-----+ +------+ +------+
473 * |tx_rings->| |\ /| |----| |<-tx_rings|
474 * | | +------+ \ / +-----+ +------+ | |
475 * | | X | |
476 * | | / \ | |
477 * | | +------+/ \+-----+ +------+ | |
478 * |rx_rings->| | | |----| |<-rx_rings|
479 * | | +------+ +-----+ +------+ | |
480 * +------+ +------+
481 *
482 * - packets coming from the bridge go to the brwap rx rings, which are also the
483 * hwna tx rings. The bwrap notify callback will then complete the hwna tx
484 * (see netmap_bwrap_notify).
485 * - packets coming from the outside go to the hwna rx rings, which are also the
486 * bwrap tx rings. The (overwritten) hwna notify method will then complete
487 * the bridge tx (see netmap_bwrap_intr_notify).
488 *
489 * The bridge wrapper may optionally connect the hwna 'host' rings to the
490 * bridge. This is done by using a second port in the bridge and connecting it
491 * to the 'host' netmap_vp_adapter contained in the netmap_bwrap_adapter.
492 * The brwap host adapter cross-links the hwna host rings in the same way as shown above.
493 *
494 * - packets coming from the bridge and directed to host stack are handled by the
495 * bwrap host notify callback (see netmap_bwrap_host_notify)
496 * - packets coming from the host stack are still handled by the overwritten
497 * hwna notify callback (netmap_bwrap_intr_notify), but are diverted to the
498 * host adapter depending on the ring number.
499 *
500 */
501struct netmap_bwrap_adapter {
502 struct netmap_vp_adapter up;
503 struct netmap_vp_adapter host; /* for host rings */
504 struct netmap_adapter *hwna; /* the underlying device */
505
506 /* backup of the hwna notify callback */
507 int (*save_notify)(struct netmap_adapter *,
508 u_int ring, enum txrx, int flags);
337 /* When we attach a physical interface to the bridge, we
338 * allow the controlling process to terminate, so we need
339 * a place to store the netmap_priv_d data structure.
340 * This is only done when physical interfaces are attached to a bridge.
341 */
342 struct netmap_priv_d *na_kpriv;
509 /* When we attach a physical interface to the bridge, we
510 * allow the controlling process to terminate, so we need
511 * a place to store the netmap_priv_d data structure.
512 * This is only done when physical interfaces are attached to a bridge.
513 */
514 struct netmap_priv_d *na_kpriv;
343
344 /* memory allocator */
345 struct netmap_mem_d *nm_mem;
346#ifdef linux
347 struct net_device_ops nm_ndo;
348#endif /* linux */
349};
350
515};
516
517
351/*
518/*
352 * Available space in the ring.
519 * Available space in the ring. Only used in VALE code
353 */
354static inline uint32_t
355nm_kr_space(struct netmap_kring *k, int is_rx)
356{
357 int space;
358
359 if (is_rx) {
520 */
521static inline uint32_t
522nm_kr_space(struct netmap_kring *k, int is_rx)
523{
524 int space;
525
526 if (is_rx) {
360 int busy = k->nkr_hwlease - k->nr_hwcur;
527 int busy = k->nkr_hwlease - k->nr_hwcur + k->nr_hwreserved;
361 if (busy < 0)
362 busy += k->nkr_num_slots;
363 space = k->nkr_num_slots - 1 - busy;
364 } else {
365 space = k->nr_hwcur + k->nr_hwavail - k->nkr_hwlease;
366 if (space < 0)
367 space += k->nkr_num_slots;
368 }

--- 7 unchanged lines hidden (view full) ---

376 D("invalid kring, cur %d avail %d lease %d lease_idx %d lim %d", k->nr_hwcur, k->nr_hwavail, k->nkr_hwlease,
377 k->nkr_lease_idx, k->nkr_num_slots);
378 }
379#endif
380 return space;
381}
382
383
528 if (busy < 0)
529 busy += k->nkr_num_slots;
530 space = k->nkr_num_slots - 1 - busy;
531 } else {
532 space = k->nr_hwcur + k->nr_hwavail - k->nkr_hwlease;
533 if (space < 0)
534 space += k->nkr_num_slots;
535 }

--- 7 unchanged lines hidden (view full) ---

543 D("invalid kring, cur %d avail %d lease %d lease_idx %d lim %d", k->nr_hwcur, k->nr_hwavail, k->nkr_hwlease,
544 k->nkr_lease_idx, k->nkr_num_slots);
545 }
546#endif
547 return space;
548}
549
550
384/* return update position */
385static inline uint32_t
386nm_kr_rxpos(struct netmap_kring *k)
387{
388 uint32_t pos = k->nr_hwcur + k->nr_hwavail;
389 if (pos >= k->nkr_num_slots)
390 pos -= k->nkr_num_slots;
391#if 0
392 if (pos >= k->nkr_num_slots ||
393 k->nkr_hwlease >= k->nkr_num_slots ||
394 k->nr_hwcur >= k->nkr_num_slots ||
395 k->nr_hwavail >= k->nkr_num_slots ||
396 k->nkr_lease_idx >= k->nkr_num_slots) {
397 D("invalid kring, cur %d avail %d lease %d lease_idx %d lim %d", k->nr_hwcur, k->nr_hwavail, k->nkr_hwlease,
398 k->nkr_lease_idx, k->nkr_num_slots);
399 }
400#endif
401 return pos;
402}
403
404
405/* make a lease on the kring for N positions. return the
406 * lease index
407 */
408static inline uint32_t
409nm_kr_lease(struct netmap_kring *k, u_int n, int is_rx)
410{

--- 19 unchanged lines hidden (view full) ---

430 D("invalid kring %s, cur %d avail %d lease %d lease_idx %d lim %d",
431 k->na->ifp->if_xname,
432 k->nr_hwcur, k->nr_hwavail, k->nkr_hwlease,
433 k->nkr_lease_idx, k->nkr_num_slots);
434 }
435 return lease_idx;
436}
437
551
552
553/* make a lease on the kring for N positions. return the
554 * lease index
555 */
556static inline uint32_t
557nm_kr_lease(struct netmap_kring *k, u_int n, int is_rx)
558{

--- 19 unchanged lines hidden (view full) ---

578 D("invalid kring %s, cur %d avail %d lease %d lease_idx %d lim %d",
579 k->na->ifp->if_xname,
580 k->nr_hwcur, k->nr_hwavail, k->nkr_hwlease,
581 k->nkr_lease_idx, k->nkr_num_slots);
582 }
583 return lease_idx;
584}
585
586#endif /* WITH_VALE */
438
587
588/* return update position */
589static inline uint32_t
590nm_kr_rxpos(struct netmap_kring *k)
591{
592 uint32_t pos = k->nr_hwcur + k->nr_hwavail;
593 if (pos >= k->nkr_num_slots)
594 pos -= k->nkr_num_slots;
595#if 0
596 if (pos >= k->nkr_num_slots ||
597 k->nkr_hwlease >= k->nkr_num_slots ||
598 k->nr_hwcur >= k->nkr_num_slots ||
599 k->nr_hwavail >= k->nkr_num_slots ||
600 k->nkr_lease_idx >= k->nkr_num_slots) {
601 D("invalid kring, cur %d avail %d lease %d lease_idx %d lim %d", k->nr_hwcur, k->nr_hwavail, k->nkr_hwlease,
602 k->nkr_lease_idx, k->nkr_num_slots);
603 }
604#endif
605 return pos;
606}
607
608
439/*
609/*
440 * XXX NETMAP_DELETING() is unused
441 *
442 * The combination of "enable" (ifp->if_capenable & IFCAP_NETMAP)
443 * and refcount gives the status of the interface, namely:
444 *
445 * enable refcount Status
446 *
447 * FALSE 0 normal operation
448 * FALSE != 0 -- (impossible)
449 * TRUE 1 netmap mode
450 * TRUE 0 being deleted.
610 * protect against multiple threads using the same ring.
611 * also check that the ring has not been stopped.
612 * We only care for 0 or !=0 as a return code.
451 */
613 */
614#define NM_KR_BUSY 1
615#define NM_KR_STOPPED 2
452
616
453#define NETMAP_DELETING(_na) ( ((_na)->refcount == 0) && \
454 ( (_na)->ifp->if_capenable & IFCAP_NETMAP) )
617static __inline void nm_kr_put(struct netmap_kring *kr)
618{
619 NM_ATOMIC_CLEAR(&kr->nr_busy);
620}
455
621
622static __inline int nm_kr_tryget(struct netmap_kring *kr)
623{
624 /* check a first time without taking the lock
625 * to avoid starvation for nm_kr_get()
626 */
627 if (unlikely(kr->nkr_stopped)) {
628 ND("ring %p stopped (%d)", kr, kr->nkr_stopped);
629 return NM_KR_STOPPED;
630 }
631 if (unlikely(NM_ATOMIC_TEST_AND_SET(&kr->nr_busy)))
632 return NM_KR_BUSY;
633 /* check a second time with lock held */
634 if (unlikely(kr->nkr_stopped)) {
635 ND("ring %p stopped (%d)", kr, kr->nkr_stopped);
636 nm_kr_put(kr);
637 return NM_KR_STOPPED;
638 }
639 return 0;
640}
456
641
642
457/*
458 * The following are support routines used by individual drivers to
459 * support netmap operation.
460 *
461 * netmap_attach() initializes a struct netmap_adapter, allocating the
462 * struct netmap_ring's and the struct selinfo.
463 *
464 * netmap_detach() frees the memory allocated by netmap_attach().
465 *
466 * netmap_transmit() replaces the if_transmit routine of the interface,
467 * and is used to intercept packets coming from the stack.
468 *
469 * netmap_load_map/netmap_reload_map are helper routines to set/reset
470 * the dmamap for a packet buffer
471 *
472 * netmap_reset() is a helper routine to be called in the driver
473 * when reinitializing a ring.
474 */
643/*
644 * The following are support routines used by individual drivers to
645 * support netmap operation.
646 *
647 * netmap_attach() initializes a struct netmap_adapter, allocating the
648 * struct netmap_ring's and the struct selinfo.
649 *
650 * netmap_detach() frees the memory allocated by netmap_attach().
651 *
652 * netmap_transmit() replaces the if_transmit routine of the interface,
653 * and is used to intercept packets coming from the stack.
654 *
655 * netmap_load_map/netmap_reload_map are helper routines to set/reset
656 * the dmamap for a packet buffer
657 *
658 * netmap_reset() is a helper routine to be called in the driver
659 * when reinitializing a ring.
660 */
475int netmap_attach(struct netmap_adapter *, u_int);
661int netmap_attach(struct netmap_adapter *);
662int netmap_attach_common(struct netmap_adapter *);
663void netmap_detach_common(struct netmap_adapter *na);
476void netmap_detach(struct ifnet *);
477int netmap_transmit(struct ifnet *, struct mbuf *);
664void netmap_detach(struct ifnet *);
665int netmap_transmit(struct ifnet *, struct mbuf *);
478enum txrx { NR_RX = 0, NR_TX = 1 };
479struct netmap_slot *netmap_reset(struct netmap_adapter *na,
480 enum txrx tx, u_int n, u_int new_cur);
481int netmap_ring_reinit(struct netmap_kring *);
482
666struct netmap_slot *netmap_reset(struct netmap_adapter *na,
667 enum txrx tx, u_int n, u_int new_cur);
668int netmap_ring_reinit(struct netmap_kring *);
669
670/* set/clear native flags. XXX maybe also if_transmit ? */
671static inline void
672nm_set_native_flags(struct netmap_adapter *na)
673{
674 struct ifnet *ifp = na->ifp;
675
676 na->na_flags |= (NAF_NATIVE_ON | NAF_NETMAP_ON);
677#ifdef IFCAP_NETMAP /* or FreeBSD ? */
678 ifp->if_capenable |= IFCAP_NETMAP;
679#endif
680#ifdef __FreeBSD__
681 na->if_transmit = ifp->if_transmit;
682 ifp->if_transmit = netmap_transmit;
683#else
684 na->if_transmit = (void *)ifp->netdev_ops;
685 ifp->netdev_ops = &((struct netmap_hw_adapter *)na)->nm_ndo;
686#endif
687}
688
689static inline void
690nm_clear_native_flags(struct netmap_adapter *na)
691{
692 struct ifnet *ifp = na->ifp;
693
694#ifdef __FreeBSD__
695 ifp->if_transmit = na->if_transmit;
696#else
697 ifp->netdev_ops = (void *)na->if_transmit;
698#endif
699 na->na_flags &= ~(NAF_NATIVE_ON | NAF_NETMAP_ON);
700#ifdef IFCAP_NETMAP /* or FreeBSD ? */
701 ifp->if_capenable &= ~IFCAP_NETMAP;
702#endif
703}
704
705/*
706 * validates parameters in the ring/kring, returns a value for cur,
707 * and the 'new_slots' value in the argument.
708 * If any error, returns cur > lim to force a reinit.
709 */
710u_int nm_txsync_prologue(struct netmap_kring *, u_int *);
711
712/*
713 * validates parameters in the ring/kring, returns a value for cur,
714 * and the 'reserved' value in the argument.
715 * If any error, returns cur > lim to force a reinit.
716 */
717u_int nm_rxsync_prologue(struct netmap_kring *, u_int *);
718
719/*
720 * update kring and ring at the end of txsync
721 */
722static inline void
723nm_txsync_finalize(struct netmap_kring *kring, u_int cur)
724{
725 /* recompute hwreserved */
726 kring->nr_hwreserved = cur - kring->nr_hwcur;
727 if (kring->nr_hwreserved < 0)
728 kring->nr_hwreserved += kring->nkr_num_slots;
729
730 /* update avail and reserved to what the kernel knows */
731 kring->ring->avail = kring->nr_hwavail;
732 kring->ring->reserved = kring->nr_hwreserved;
733}
734
735/* check/fix address and len in tx rings */
736#if 1 /* debug version */
737#define NM_CHECK_ADDR_LEN(_a, _l) do { \
738 if (_a == netmap_buffer_base || _l > NETMAP_BUF_SIZE) { \
739 RD(5, "bad addr/len ring %d slot %d idx %d len %d", \
740 ring_nr, nm_i, slot->buf_idx, len); \
741 if (_l > NETMAP_BUF_SIZE) \
742 _l = NETMAP_BUF_SIZE; \
743 } } while (0)
744#else /* no debug version */
745#define NM_CHECK_ADDR_LEN(_a, _l) do { \
746 if (_l > NETMAP_BUF_SIZE) \
747 _l = NETMAP_BUF_SIZE; \
748 } while (0)
749#endif
750
751
752/*---------------------------------------------------------------*/
753/*
754 * Support routines to be used with the VALE switch
755 */
756int netmap_update_config(struct netmap_adapter *na);
757int netmap_krings_create(struct netmap_adapter *na, u_int ntx, u_int nrx, u_int tailroom);
758void netmap_krings_delete(struct netmap_adapter *na);
759
760struct netmap_if *
761netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na,
762 uint16_t ringid, int *err);
763
764
765
483u_int nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, const char *msg);
766u_int nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, const char *msg);
767int netmap_get_na(struct nmreq *nmr, struct netmap_adapter **na, int create);
768int netmap_get_hw_na(struct ifnet *ifp, struct netmap_adapter **na);
484
769
770#ifdef WITH_VALE
485/*
486 * The following bridge-related interfaces are used by other kernel modules
487 * In the version that only supports unicast or broadcast, the lookup
488 * function can return 0 .. NM_BDG_MAXPORTS-1 for regular ports,
489 * NM_BDG_MAXPORTS for broadcast, NM_BDG_MAXPORTS+1 for unknown.
490 * XXX in practice "unknown" might be handled same as broadcast.
491 */
771/*
772 * The following bridge-related interfaces are used by other kernel modules
773 * In the version that only supports unicast or broadcast, the lookup
774 * function can return 0 .. NM_BDG_MAXPORTS-1 for regular ports,
775 * NM_BDG_MAXPORTS for broadcast, NM_BDG_MAXPORTS+1 for unknown.
776 * XXX in practice "unknown" might be handled same as broadcast.
777 */
492typedef u_int (*bdg_lookup_fn_t)(char *buf, u_int len, uint8_t *ring_nr,
493 struct netmap_adapter *);
494int netmap_bdg_ctl(struct nmreq *nmr, bdg_lookup_fn_t func);
495u_int netmap_bdg_learning(char *, u_int, uint8_t *, struct netmap_adapter *);
496#define NM_NAME "vale" /* prefix for the bridge port name */
497#define NM_BDG_MAXPORTS 254 /* up to 32 for bitmap, 254 ok otherwise */
778typedef u_int (*bdg_lookup_fn_t)(char *buf, u_int len,
779 uint8_t *ring_nr, struct netmap_vp_adapter *);
780u_int netmap_bdg_learning(char *, u_int, uint8_t *,
781 struct netmap_vp_adapter *);
782
783#define NM_BDG_MAXPORTS 254 /* up to 254 */
498#define NM_BDG_BROADCAST NM_BDG_MAXPORTS
499#define NM_BDG_NOPORT (NM_BDG_MAXPORTS+1)
500
784#define NM_BDG_BROADCAST NM_BDG_MAXPORTS
785#define NM_BDG_NOPORT (NM_BDG_MAXPORTS+1)
786
787#define NM_NAME "vale" /* prefix for bridge port name */
788
789
790/* these are redefined in case of no VALE support */
791int netmap_get_bdg_na(struct nmreq *nmr, struct netmap_adapter **na, int create);
792void netmap_init_bridges(void);
793int netmap_bdg_ctl(struct nmreq *nmr, bdg_lookup_fn_t func);
794
795#else /* !WITH_VALE */
796#define netmap_get_bdg_na(_1, _2, _3) 0
797#define netmap_init_bridges(_1)
798#define netmap_bdg_ctl(_1, _2) EINVAL
799#endif /* !WITH_VALE */
800
801/* Various prototypes */
802int netmap_poll(struct cdev *dev, int events, struct thread *td);
803
804
805int netmap_init(void);
806void netmap_fini(void);
807int netmap_get_memory(struct netmap_priv_d* p);
808void netmap_dtor(void *data);
809int netmap_dtor_locked(struct netmap_priv_d *priv);
810
811int netmap_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td);
812
813/* netmap_adapter creation/destruction */
814#define NM_IFPNAME(ifp) ((ifp) ? (ifp)->if_xname : "zombie")
815#define NM_DEBUG_PUTGET 1
816
817#ifdef NM_DEBUG_PUTGET
818
819#define NM_DBG(f) __##f
820
821void __netmap_adapter_get(struct netmap_adapter *na);
822
823#define netmap_adapter_get(na) \
824 do { \
825 struct netmap_adapter *__na = na; \
826 D("getting %p:%s (%d)", __na, NM_IFPNAME(__na->ifp), __na->na_refcount); \
827 __netmap_adapter_get(__na); \
828 } while (0)
829
830int __netmap_adapter_put(struct netmap_adapter *na);
831
832#define netmap_adapter_put(na) \
833 do { \
834 struct netmap_adapter *__na = na; \
835 D("putting %p:%s (%d)", __na, NM_IFPNAME(__na->ifp), __na->na_refcount); \
836 __netmap_adapter_put(__na); \
837 } while (0)
838
839#else /* !NM_DEBUG_PUTGET */
840
841#define NM_DBG(f) f
842void netmap_adapter_get(struct netmap_adapter *na);
843int netmap_adapter_put(struct netmap_adapter *na);
844
845#endif /* !NM_DEBUG_PUTGET */
846
847
501extern u_int netmap_buf_size;
502#define NETMAP_BUF_SIZE netmap_buf_size // XXX remove
503extern int netmap_mitigate;
504extern int netmap_no_pendintr;
505extern u_int netmap_total_buffers;
506extern char *netmap_buffer_base;
507extern int netmap_verbose; // XXX debugging
508enum { /* verbose flags */
509 NM_VERB_ON = 1, /* generic verbose */
510 NM_VERB_HOST = 0x2, /* verbose host stack */
511 NM_VERB_RXSYNC = 0x10, /* verbose on rxsync/txsync */
512 NM_VERB_TXSYNC = 0x20,
513 NM_VERB_RXINTR = 0x100, /* verbose on rx/tx intr (driver) */
514 NM_VERB_TXINTR = 0x200,
515 NM_VERB_NIC_RXSYNC = 0x1000, /* verbose on rx/tx intr (driver) */
516 NM_VERB_NIC_TXSYNC = 0x2000,
517};
518
848extern u_int netmap_buf_size;
849#define NETMAP_BUF_SIZE netmap_buf_size // XXX remove
850extern int netmap_mitigate;
851extern int netmap_no_pendintr;
852extern u_int netmap_total_buffers;
853extern char *netmap_buffer_base;
854extern int netmap_verbose; // XXX debugging
855enum { /* verbose flags */
856 NM_VERB_ON = 1, /* generic verbose */
857 NM_VERB_HOST = 0x2, /* verbose host stack */
858 NM_VERB_RXSYNC = 0x10, /* verbose on rxsync/txsync */
859 NM_VERB_TXSYNC = 0x20,
860 NM_VERB_RXINTR = 0x100, /* verbose on rx/tx intr (driver) */
861 NM_VERB_TXINTR = 0x200,
862 NM_VERB_NIC_RXSYNC = 0x1000, /* verbose on rx/tx intr (driver) */
863 NM_VERB_NIC_TXSYNC = 0x2000,
864};
865
866extern int netmap_txsync_retry;
867extern int netmap_generic_mit;
868extern int netmap_generic_ringsize;
869
519/*
520 * NA returns a pointer to the struct netmap adapter from the ifp,
521 * WNA is used to write it.
870/*
871 * NA returns a pointer to the struct netmap adapter from the ifp,
872 * WNA is used to write it.
522 * SWNA() is used for the "host stack" endpoint associated
523 * to an interface. It is allocated together with the main NA(),
524 * as an array of two objects.
525 */
526#ifndef WNA
527#define WNA(_ifp) (_ifp)->if_pspare[0]
528#endif
529#define NA(_ifp) ((struct netmap_adapter *)WNA(_ifp))
873 */
874#ifndef WNA
875#define WNA(_ifp) (_ifp)->if_pspare[0]
876#endif
877#define NA(_ifp) ((struct netmap_adapter *)WNA(_ifp))
530#define SWNA(_ifp) (NA(_ifp) + 1)
531
532/*
533 * Macros to determine if an interface is netmap capable or netmap enabled.
534 * See the magic field in struct netmap_adapter.
535 */
536#ifdef __FreeBSD__
537/*
538 * on FreeBSD just use if_capabilities and if_capenable.

--- 17 unchanged lines hidden (view full) ---

556 ((uint32_t)(uintptr_t)NA(ifp) ^ NA(ifp)->magic) == NETMAP_MAGIC )
557
558#define NETMAP_SET_CAPABLE(ifp) \
559 NA(ifp)->magic = ((uint32_t)(uintptr_t)NA(ifp)) ^ NETMAP_MAGIC
560
561#endif /* linux */
562
563#ifdef __FreeBSD__
878
879/*
880 * Macros to determine if an interface is netmap capable or netmap enabled.
881 * See the magic field in struct netmap_adapter.
882 */
883#ifdef __FreeBSD__
884/*
885 * on FreeBSD just use if_capabilities and if_capenable.

--- 17 unchanged lines hidden (view full) ---

903 ((uint32_t)(uintptr_t)NA(ifp) ^ NA(ifp)->magic) == NETMAP_MAGIC )
904
905#define NETMAP_SET_CAPABLE(ifp) \
906 NA(ifp)->magic = ((uint32_t)(uintptr_t)NA(ifp)) ^ NETMAP_MAGIC
907
908#endif /* linux */
909
910#ifdef __FreeBSD__
911
564/* Callback invoked by the dma machinery after a successfull dmamap_load */
565static void netmap_dmamap_cb(__unused void *arg,
566 __unused bus_dma_segment_t * segs, __unused int nseg, __unused int error)
567{
568}
569
570/* bus_dmamap_load wrapper: call aforementioned function if map != NULL.
571 * XXX can we do it without a callback ?

--- 11 unchanged lines hidden (view full) ---

583netmap_reload_map(bus_dma_tag_t tag, bus_dmamap_t map, void *buf)
584{
585 if (map) {
586 bus_dmamap_unload(tag, map);
587 bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE,
588 netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT);
589 }
590}
912/* Callback invoked by the dma machinery after a successfull dmamap_load */
913static void netmap_dmamap_cb(__unused void *arg,
914 __unused bus_dma_segment_t * segs, __unused int nseg, __unused int error)
915{
916}
917
918/* bus_dmamap_load wrapper: call aforementioned function if map != NULL.
919 * XXX can we do it without a callback ?

--- 11 unchanged lines hidden (view full) ---

931netmap_reload_map(bus_dma_tag_t tag, bus_dmamap_t map, void *buf)
932{
933 if (map) {
934 bus_dmamap_unload(tag, map);
935 bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE,
936 netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT);
937 }
938}
939
591#else /* linux */
592
593/*
594 * XXX How do we redefine these functions:
595 *
596 * on linux we need
597 * dma_map_single(&pdev->dev, virt_addr, len, direction)
598 * dma_unmap_single(&adapter->pdev->dev, phys_addr, len, direction

--- 91 unchanged lines hidden (view full) ---

690{
691 uint32_t i = slot->buf_idx;
692 void *ret = (i >= netmap_total_buffers) ? NMB_VA(0) : NMB_VA(i);
693
694 *pp = (i >= netmap_total_buffers) ? NMB_PA(0) : NMB_PA(i);
695 return ret;
696}
697
940#else /* linux */
941
942/*
943 * XXX How do we redefine these functions:
944 *
945 * on linux we need
946 * dma_map_single(&pdev->dev, virt_addr, len, direction)
947 * dma_unmap_single(&adapter->pdev->dev, phys_addr, len, direction

--- 91 unchanged lines hidden (view full) ---

1039{
1040 uint32_t i = slot->buf_idx;
1041 void *ret = (i >= netmap_total_buffers) ? NMB_VA(0) : NMB_VA(i);
1042
1043 *pp = (i >= netmap_total_buffers) ? NMB_PA(0) : NMB_PA(i);
1044 return ret;
1045}
1046
1047/* Generic version of NMB, which uses device-specific memory. */
1048static inline void *
1049BDG_NMB(struct netmap_adapter *na, struct netmap_slot *slot)
1050{
1051 struct lut_entry *lut = na->na_lut;
1052 uint32_t i = slot->buf_idx;
1053 return (unlikely(i >= na->na_lut_objtotal)) ?
1054 lut[0].vaddr : lut[i].vaddr;
1055}
1056
698/* default functions to handle rx/tx interrupts */
699int netmap_rx_irq(struct ifnet *, u_int, u_int *);
700#define netmap_tx_irq(_n, _q) netmap_rx_irq(_n, _q, NULL)
1057/* default functions to handle rx/tx interrupts */
1058int netmap_rx_irq(struct ifnet *, u_int, u_int *);
1059#define netmap_tx_irq(_n, _q) netmap_rx_irq(_n, _q, NULL)
1060void netmap_common_irq(struct ifnet *, u_int, u_int *work_done);
701
1061
702#ifdef __FreeBSD__
703MALLOC_DECLARE(M_NETMAP);
704#endif /* __FreeBSD__ */
705
1062
706
1063void netmap_txsync_to_host(struct netmap_adapter *na);
707void netmap_disable_all_rings(struct ifnet *);
708void netmap_enable_all_rings(struct ifnet *);
1064void netmap_disable_all_rings(struct ifnet *);
1065void netmap_enable_all_rings(struct ifnet *);
1066void netmap_disable_ring(struct netmap_kring *kr);
709
1067
1068
1069/* Structure associated to each thread which registered an interface.
1070 *
1071 * The first 4 fields of this structure are written by NIOCREGIF and
1072 * read by poll() and NIOC?XSYNC.
1073 * There is low contention among writers (actually, a correct user program
1074 * should have no contention among writers) and among writers and readers,
1075 * so we use a single global lock to protect the structure initialization.
1076 * Since initialization involves the allocation of memory, we reuse the memory
1077 * allocator lock.
1078 * Read access to the structure is lock free. Readers must check that
1079 * np_nifp is not NULL before using the other fields.
1080 * If np_nifp is NULL initialization has not been performed, so they should
1081 * return an error to userlevel.
1082 *
1083 * The ref_done field is used to regulate access to the refcount in the
1084 * memory allocator. The refcount must be incremented at most once for
1085 * each open("/dev/netmap"). The increment is performed by the first
1086 * function that calls netmap_get_memory() (currently called by
1087 * mmap(), NIOCGINFO and NIOCREGIF).
1088 * If the refcount is incremented, it is then decremented when the
1089 * private structure is destroyed.
1090 */
1091struct netmap_priv_d {
1092 struct netmap_if * volatile np_nifp; /* netmap if descriptor. */
1093
1094 struct netmap_adapter *np_na;
1095 int np_ringid; /* from the ioctl */
1096 u_int np_qfirst, np_qlast; /* range of rings to scan */
1097 uint16_t np_txpoll;
1098
1099 struct netmap_mem_d *np_mref; /* use with NMG_LOCK held */
1100 /* np_refcount is only used on FreeBSD */
1101 int np_refcount; /* use with NMG_LOCK held */
1102};
1103
1104
1105/*
1106 * generic netmap emulation for devices that do not have
1107 * native netmap support.
1108 * XXX generic_netmap_register() is only exported to implement
1109 * nma_is_generic().
1110 */
1111int generic_netmap_register(struct netmap_adapter *na, int enable);
1112int generic_netmap_attach(struct ifnet *ifp);
1113
1114int netmap_catch_rx(struct netmap_adapter *na, int intercept);
1115void generic_rx_handler(struct ifnet *ifp, struct mbuf *m);;
1116void netmap_catch_packet_steering(struct netmap_generic_adapter *na, int enable);
1117int generic_xmit_frame(struct ifnet *ifp, struct mbuf *m, void *addr, u_int len, u_int ring_nr);
1118int generic_find_num_desc(struct ifnet *ifp, u_int *tx, u_int *rx);
1119void generic_find_num_queues(struct ifnet *ifp, u_int *txq, u_int *rxq);
1120
1121static __inline int
1122nma_is_generic(struct netmap_adapter *na)
1123{
1124 return na->nm_register == generic_netmap_register;
1125}
1126
1127/*
1128 * netmap_mitigation API. This is used by the generic adapter
1129 * to reduce the number of interrupt requests/selwakeup
1130 * to clients on incoming packets.
1131 */
1132void netmap_mitigation_init(struct netmap_generic_adapter *na);
1133void netmap_mitigation_start(struct netmap_generic_adapter *na);
1134void netmap_mitigation_restart(struct netmap_generic_adapter *na);
1135int netmap_mitigation_active(struct netmap_generic_adapter *na);
1136void netmap_mitigation_cleanup(struct netmap_generic_adapter *na);
1137
1138// int generic_timer_handler(struct hrtimer *t);
1139
710#endif /* _NET_NETMAP_KERN_H_ */
1140#endif /* _NET_NETMAP_KERN_H_ */