Deleted Added
full compact
2,3c2
< * Copyright (C) 2011 Matteo Landi, Luigi Rizzo. All rights reserved.
< * Copyright (C) 2013 Universita` di Pisa
---
> * Copyright (C) 2011-2014 Universita` di Pisa. All rights reserved.
29c28
< * $FreeBSD: head/sys/net/netmap_user.h 259412 2013-12-15 08:37:24Z luigi $
---
> * $FreeBSD: head/sys/net/netmap_user.h 260368 2014-01-06 12:53:15Z luigi $
31,32c30,31
< * This header contains the macros used to manipulate netmap structures
< * and packets in userspace. See netmap(4) for more information.
---
> * Functions and macros to manipulate netmap structures and packets
> * in userspace. See netmap(4) for more information.
47c46
< * directly plen, flags, bufindex)
---
> * directly len, flags, buf_idx)
52,53c51,53
< * Since rings are circular, we have macros to compute the next index
< * i = NETMAP_RING_NEXT(ring, i);
---
> * All ring indexes (head, cur, tail) should always move forward.
> * To compute the next index in a circular ring you can use
> * i = nm_ring_next(ring, i);
56,57c56,59
< * that can be called to open, close and read from netmap in a way
< * similar to libpcap.
---
> * that can be called to open, close, read and write on netmap in a way
> * similar to libpcap. Note that the read/write function depend on
> * an ioctl()/select()/poll() being issued to refill rings or push
> * packets out.
67a70,75
>
> #ifndef likely
> #define likely(x) __builtin_expect(!!(x), 1)
> #define unlikely(x) __builtin_expect(!!(x), 0)
> #endif /* likely and unlikely */
>
69a78
> /* helper macro */
73c82
< #define NETMAP_IF(b, o) _NETMAP_OFFSET(struct netmap_if *, b, o)
---
> #define NETMAP_IF(_base, _ofs) _NETMAP_OFFSET(struct netmap_if *, _base, _ofs)
88,89d96
< #define NETMAP_RING_NEXT(r, i) \
< ((i)+1 == (r)->num_slots ? 0 : (i) + 1 )
91,94c98,102
< #define NETMAP_RING_FIRST_RESERVED(r) \
< ( (r)->cur < (r)->reserved ? \
< (r)->cur + (r)->num_slots - (r)->reserved : \
< (r)->cur - (r)->reserved )
---
> static inline uint32_t
> nm_ring_next(struct netmap_ring *r, uint32_t i)
> {
> return ( unlikely(i + 1 == r->num_slots) ? 0 : i + 1);
> }
95a104
>
97c106,107
< * Return 1 if the given tx ring is empty.
---
> * Return 1 if we have pending transmissions in the tx ring.
> * When everything is complete ring->cur = ring->tail + 1 (modulo ring size)
99c109,113
< #define NETMAP_TX_RING_EMPTY(r) ((r)->avail >= (r)->num_slots - 1)
---
> static inline int
> nm_tx_pending(struct netmap_ring *r)
> {
> return nm_ring_next(r, r->tail) != r->cur;
> }
100a115,125
>
> static inline uint32_t
> nm_ring_space(struct netmap_ring *ring)
> {
> int ret = ring->tail - ring->cur;
> if (ret < 0)
> ret += ring->num_slots;
> return ret;
> }
>
>
116c141,146
< #include <malloc.h>
---
> #include <unistd.h> /* close() */
> #ifdef __FreeBSD__
> #include <stdlib.h>
> #else
> #include <malloc.h> /* on FreeBSD it is stdlib.h */
> #endif
141a172
>
142a174,203
> * this is a slightly optimized copy routine which rounds
> * to multiple of 64 bytes and is often faster than dealing
> * with other odd sizes. We assume there is enough room
> * in the source and destination buffers.
> *
> * XXX only for multiples of 64 bytes, non overlapped.
> */
> static inline void
> pkt_copy(const void *_src, void *_dst, int l)
> {
> const uint64_t *src = _src;
> uint64_t *dst = _dst;
> if (unlikely(l >= 1024)) {
> memcpy(dst, src, l);
> return;
> }
> for (; likely(l > 0); l-=64) {
> *dst++ = *src++;
> *dst++ = *src++;
> *dst++ = *src++;
> *dst++ = *src++;
> *dst++ = *src++;
> *dst++ = *src++;
> *dst++ = *src++;
> *dst++ = *src++;
> }
> }
>
>
> /*
148,149c209,217
< * The open routine accepts an ifname (netmap:foo or vale:foo) and
< * optionally a second (string) argument indicating the ring number
---
> *--- the pcap-like API ---
> *
> * nm_open() opens a file descriptor, binds to a port and maps memory.
> *
> * ifname (netmap:foo or vale:foo) is the port name
> * flags can be NETMAP_SW_RING or NETMAP_HW_RING etc.
> * ring_no only used if NETMAP_HW_RING is specified, is interpreted
> * as a string or integer indicating the ring number
> * ring_flags is stored in all ring flags (e.g. for transparent mode)
151a220
>
156,157c225
< * nm_dispatch() is the same as pcap_dispatch()
< * nm_next() is the same as pcap_next()
---
> * nm_close() closes and restores the port to its previous state
159,160d226
< static int nm_dispatch(struct nm_desc_t *, int, nm_cb_t, u_char *);
< static u_char *nm_next(struct nm_desc_t *, struct nm_hdr_t *);
161a228,229
> static int nm_close(struct nm_desc_t *);
>
163c231,233
< * unmap memory, close file descriptor and free the descriptor.
---
> * nm_inject() is the same as pcap_inject()
> * nm_dispatch() is the same as pcap_dispatch()
> * nm_nextpkt() is the same as pcap_next()
165d234
< static int nm_close(struct nm_desc_t *);
166a236,238
> static int nm_inject(struct nm_desc_t *, const void *, size_t);
> static int nm_dispatch(struct nm_desc_t *, int, nm_cb_t, u_char *);
> static u_char *nm_nextpkt(struct nm_desc_t *, struct nm_hdr_t *);
167a240
>
242a316,321
> /*
> * ugly trick to avoid unused warnings
> */
> static void *__xxzt[] __attribute__ ((unused)) =
> { nm_open, nm_inject, nm_dispatch, nm_nextpkt } ;
>
255a335,371
> * Same prototype as pcap_inject(), only need to cast.
> */
> static int
> nm_inject(struct nm_desc_t *d, const void *buf, size_t size)
> {
> u_int c, n = d->last_ring - d->first_ring + 1;
>
> if (0) fprintf(stderr, "%s rings %d %d %d\n", __FUNCTION__,
> d->first_ring, d->cur_ring, d->last_ring);
> for (c = 0; c < n ; c++) {
> /* compute current ring to use */
> struct netmap_ring *ring;
> uint32_t i, idx;
> uint32_t ri = d->cur_ring + c;
>
> if (ri > d->last_ring)
> ri = d->first_ring;
> ring = NETMAP_TXRING(d->nifp, ri);
> if (nm_ring_empty(ring)) {
> if (0) fprintf(stderr, "%s ring %d cur %d tail %d\n",
> __FUNCTION__,
> ri, ring->cur, ring->tail);
> continue;
> }
> i = ring->cur;
> idx = ring->slot[i].buf_idx;
> ring->slot[i].len = size;
> pkt_copy(buf, NETMAP_BUF(ring, idx), size);
> d->cur_ring = ri;
> ring->head = ring->cur = nm_ring_next(ring, i);
> return size;
> }
> return 0; /* fail */
> }
>
>
> /*
258d373
< inline /* not really, but disable unused warnings */
279c394
< for ( ; ring->avail > 0 && cnt != got; got++) {
---
> for ( ; !nm_ring_empty(ring) && cnt != got; got++) {
288,289c403
< ring->cur = NETMAP_RING_NEXT(ring, i);
< ring->avail--;
---
> ring->head = ring->cur = nm_ring_next(ring, i);
296d409
< inline /* not really, but disable unused warnings */
298c411
< nm_next(struct nm_desc_t *d, struct nm_hdr_t *hdr)
---
> nm_nextpkt(struct nm_desc_t *d, struct nm_hdr_t *hdr)
305c418
< if (ring->avail > 0) {
---
> if (!nm_ring_empty(ring)) {
313,314c426,431
< ring->cur = NETMAP_RING_NEXT(ring, i);
< ring->avail--;
---
> ring->cur = nm_ring_next(ring, i);
> /* we could postpone advancing head if we want
> * to hold the buffer. This can be supported in
> * the future.
> */
> ring->head = ring->cur;