1270063Sluigi/*
2341478Svmaffione * Copyright (C) 2014-2018 Vincenzo Maffione, Luigi Rizzo.
3270063Sluigi *
4270063Sluigi * Redistribution and use in source and binary forms, with or without
5270063Sluigi * modification, are permitted provided that the following conditions
6270063Sluigi * are met:
7270063Sluigi * 1. Redistributions of source code must retain the above copyright
8270063Sluigi *    notice, this list of conditions and the following disclaimer.
9270063Sluigi * 2. Redistributions in binary form must reproduce the above copyright
10270063Sluigi *    notice, this list of conditions and the following disclaimer in the
11270063Sluigi *    documentation and/or other materials provided with the distribution.
12270063Sluigi *
13270063Sluigi * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14270063Sluigi * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15270063Sluigi * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16270063Sluigi * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17270063Sluigi * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18270063Sluigi * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19270063Sluigi * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20270063Sluigi * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21270063Sluigi * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22270063Sluigi * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23270063Sluigi * SUCH DAMAGE.
24270063Sluigi */
25270063Sluigi
26270063Sluigi/*
27270063Sluigi * $FreeBSD: stable/11/sys/dev/netmap/if_vtnet_netmap.h 344658 2019-02-28 09:42:03Z vmaffione $
28270063Sluigi */
29270063Sluigi
30270063Sluigi#include <net/netmap.h>
31270063Sluigi#include <sys/selinfo.h>
32270063Sluigi#include <vm/vm.h>
33270063Sluigi#include <vm/pmap.h>    /* vtophys ? */
34270063Sluigi#include <dev/netmap/netmap_kern.h>
35270063Sluigi
36341478Svmaffione/*
37341478Svmaffione * Return 1 if the queue identified by 't' and 'idx' is in netmap mode.
38341478Svmaffione */
39341478Svmaffionestatic int
40341478Svmaffionevtnet_netmap_queue_on(struct vtnet_softc *sc, enum txrx t, int idx)
41341478Svmaffione{
42341478Svmaffione	struct netmap_adapter *na = NA(sc->vtnet_ifp);
43270063Sluigi
44341478Svmaffione	if (!nm_native_on(na))
45341478Svmaffione		return 0;
46270063Sluigi
47341478Svmaffione	if (t == NR_RX)
48341478Svmaffione		return !!(idx < na->num_rx_rings &&
49341478Svmaffione			na->rx_rings[idx]->nr_mode == NKR_NETMAP_ON);
50341478Svmaffione
51341478Svmaffione	return !!(idx < na->num_tx_rings &&
52341478Svmaffione		na->tx_rings[idx]->nr_mode == NKR_NETMAP_ON);
53341478Svmaffione}
54341478Svmaffione
55270097Sluigistatic void
56341478Svmaffionevtnet_free_used(struct virtqueue *vq, int netmap_bufs, enum txrx t, int idx)
57270063Sluigi{
58341478Svmaffione	void *cookie;
59341478Svmaffione	int deq = 0;
60270063Sluigi
61341478Svmaffione	while ((cookie = virtqueue_dequeue(vq, NULL)) != NULL) {
62341478Svmaffione		if (netmap_bufs) {
63341478Svmaffione			/* These are netmap buffers: there is nothing to do. */
64341478Svmaffione		} else {
65341478Svmaffione			/* These are mbufs that we need to free. */
66341478Svmaffione			struct mbuf *m;
67270063Sluigi
68341478Svmaffione			if (t == NR_TX) {
69341478Svmaffione				struct vtnet_tx_header *txhdr = cookie;
70341478Svmaffione				m = txhdr->vth_mbuf;
71270063Sluigi				m_freem(m);
72270063Sluigi				uma_zfree(vtnet_tx_header_zone, txhdr);
73341478Svmaffione			} else {
74341478Svmaffione				m = cookie;
75341478Svmaffione				m_freem(m);
76341478Svmaffione			}
77270063Sluigi		}
78341478Svmaffione		deq++;
79270063Sluigi	}
80341478Svmaffione
81341478Svmaffione	if (deq)
82342033Svmaffione		nm_prinf("%d sgs dequeued from %s-%d (netmap=%d)",
83341478Svmaffione			 deq, nm_txrx2str(t), idx, netmap_bufs);
84270063Sluigi}
85270063Sluigi
86270063Sluigi/* Register and unregister. */
87270097Sluigistatic int
88341478Svmaffionevtnet_netmap_reg(struct netmap_adapter *na, int state)
89270063Sluigi{
90341478Svmaffione	struct ifnet *ifp = na->ifp;
91341478Svmaffione	struct vtnet_softc *sc = ifp->if_softc;
92341478Svmaffione	int success;
93341478Svmaffione	int i;
94270063Sluigi
95341478Svmaffione	/* Drain the taskqueues to make sure that there are no worker threads
96341478Svmaffione	 * accessing the virtqueues. */
97341478Svmaffione	vtnet_drain_taskqueues(sc);
98341478Svmaffione
99270063Sluigi	VTNET_CORE_LOCK(sc);
100341478Svmaffione
101341478Svmaffione	/* We need nm_netmap_on() to return true when called by
102341478Svmaffione	 * vtnet_init_locked() below. */
103341478Svmaffione	if (state)
104341478Svmaffione		nm_set_native_flags(na);
105341478Svmaffione
106341478Svmaffione	/* We need to trigger a device reset in order to unexpose guest buffers
107341478Svmaffione	 * published to the host. */
108270063Sluigi	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
109341478Svmaffione	/* Get pending used buffers. The way they are freed depends on whether
110341478Svmaffione	 * they are netmap buffer or they are mbufs. We can tell apart the two
111341478Svmaffione	 * cases by looking at kring->nr_mode, before this is possibly updated
112341478Svmaffione	 * in the loop below. */
113341478Svmaffione	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
114341478Svmaffione		struct vtnet_txq *txq = &sc->vtnet_txqs[i];
115341478Svmaffione		struct vtnet_rxq *rxq = &sc->vtnet_rxqs[i];
116341478Svmaffione		struct netmap_kring *kring;
117341478Svmaffione
118341478Svmaffione		VTNET_TXQ_LOCK(txq);
119341478Svmaffione		kring = NMR(na, NR_TX)[i];
120341478Svmaffione		vtnet_free_used(txq->vtntx_vq,
121341478Svmaffione				kring->nr_mode == NKR_NETMAP_ON, NR_TX, i);
122341478Svmaffione		VTNET_TXQ_UNLOCK(txq);
123341478Svmaffione
124341478Svmaffione		VTNET_RXQ_LOCK(rxq);
125341478Svmaffione		kring = NMR(na, NR_RX)[i];
126341478Svmaffione		vtnet_free_used(rxq->vtnrx_vq,
127341478Svmaffione				kring->nr_mode == NKR_NETMAP_ON, NR_RX, i);
128341478Svmaffione		VTNET_RXQ_UNLOCK(rxq);
129341478Svmaffione	}
130341478Svmaffione	vtnet_init_locked(sc);
131341478Svmaffione	success = (ifp->if_drv_flags & IFF_DRV_RUNNING) ? 0 : ENXIO;
132341478Svmaffione
133341478Svmaffione	if (state) {
134344047Svmaffione		netmap_krings_mode_commit(na, state);
135270063Sluigi	} else {
136270063Sluigi		nm_clear_native_flags(na);
137344047Svmaffione		netmap_krings_mode_commit(na, state);
138270063Sluigi	}
139341478Svmaffione
140341478Svmaffione	VTNET_CORE_UNLOCK(sc);
141341478Svmaffione
142341478Svmaffione	return success;
143270063Sluigi}
144270063Sluigi
145270063Sluigi
146270063Sluigi/* Reconcile kernel and user view of the transmit ring. */
147270063Sluigistatic int
148270063Sluigivtnet_netmap_txsync(struct netmap_kring *kring, int flags)
149270063Sluigi{
150270063Sluigi	struct netmap_adapter *na = kring->na;
151341478Svmaffione	struct ifnet *ifp = na->ifp;
152270063Sluigi	struct netmap_ring *ring = kring->ring;
153270063Sluigi	u_int ring_nr = kring->ring_id;
154270063Sluigi	u_int nm_i;	/* index into the netmap ring */
155270063Sluigi	u_int const lim = kring->nkr_num_slots - 1;
156270063Sluigi	u_int const head = kring->rhead;
157270063Sluigi
158270063Sluigi	/* device-specific */
159341478Svmaffione	struct vtnet_softc *sc = ifp->if_softc;
160270063Sluigi	struct vtnet_txq *txq = &sc->vtnet_txqs[ring_nr];
161270063Sluigi	struct virtqueue *vq = txq->vtntx_vq;
162341477Svmaffione	int interrupts = !(kring->nr_kflags & NKR_NOINTR);
163341478Svmaffione	u_int n;
164270063Sluigi
165270063Sluigi	/*
166270063Sluigi	 * First part: process new packets to send.
167270063Sluigi	 */
168270063Sluigi	rmb();
169341477Svmaffione
170270063Sluigi	nm_i = kring->nr_hwcur;
171270063Sluigi	if (nm_i != head) {	/* we have new packets to send */
172270063Sluigi		struct sglist *sg = txq->vtntx_sg;
173270063Sluigi
174341478Svmaffione		for (; nm_i != head; nm_i = nm_next(nm_i, lim)) {
175270063Sluigi			/* we use an empty header here */
176270063Sluigi			struct netmap_slot *slot = &ring->slot[nm_i];
177270063Sluigi			u_int len = slot->len;
178270063Sluigi			uint64_t paddr;
179270063Sluigi			void *addr = PNMB(na, slot, &paddr);
180341478Svmaffione			int err;
181270063Sluigi
182270063Sluigi			NM_CHECK_ADDR_LEN(na, addr, len);
183270063Sluigi
184270063Sluigi			slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
185270063Sluigi			/* Initialize the scatterlist, expose it to the hypervisor,
186270063Sluigi			 * and kick the hypervisor (if necessary).
187270063Sluigi			 */
188270063Sluigi			sglist_reset(sg); // cheap
189341478Svmaffione			err = sglist_append(sg, &txq->vtntx_shrhdr, sc->vtnet_hdr_size);
190341478Svmaffione			err |= sglist_append_phys(sg, paddr, len);
191341478Svmaffione			KASSERT(err == 0, ("%s: cannot append to sglist %d",
192341478Svmaffione						__func__, err));
193341478Svmaffione			err = virtqueue_enqueue(vq, /*cookie=*/txq, sg,
194341478Svmaffione						/*readable=*/sg->sg_nseg,
195341478Svmaffione						/*writeable=*/0);
196341478Svmaffione			if (unlikely(err)) {
197341478Svmaffione				if (err != ENOSPC)
198342033Svmaffione					nm_prerr("virtqueue_enqueue(%s) failed: %d",
199341478Svmaffione							kring->name, err);
200341478Svmaffione				break;
201341478Svmaffione			}
202341478Svmaffione		}
203270063Sluigi
204341478Svmaffione		virtqueue_notify(vq);
205341478Svmaffione
206270063Sluigi		/* Update hwcur depending on where we stopped. */
207270063Sluigi		kring->nr_hwcur = nm_i; /* note we migth break early */
208270063Sluigi	}
209270063Sluigi
210341478Svmaffione	/* Free used slots. We only consider our own used buffers, recognized
211341478Svmaffione	 * by the token we passed to virtqueue_enqueue.
212270063Sluigi	 */
213341478Svmaffione	n = 0;
214341478Svmaffione	for (;;) {
215341478Svmaffione		void *token = virtqueue_dequeue(vq, NULL);
216341478Svmaffione		if (token == NULL)
217341478Svmaffione			break;
218341478Svmaffione		if (unlikely(token != (void *)txq))
219342033Svmaffione			nm_prerr("BUG: TX token mismatch");
220341478Svmaffione		else
221341478Svmaffione			n++;
222341478Svmaffione	}
223341478Svmaffione	if (n > 0) {
224270063Sluigi		kring->nr_hwtail += n;
225270063Sluigi		if (kring->nr_hwtail > lim)
226270063Sluigi			kring->nr_hwtail -= lim + 1;
227270063Sluigi	}
228270063Sluigi
229341478Svmaffione	if (interrupts && virtqueue_nfree(vq) < 32)
230341478Svmaffione		virtqueue_postpone_intr(vq, VQ_POSTPONE_LONG);
231341478Svmaffione
232341478Svmaffione	return 0;
233270063Sluigi}
234270063Sluigi
235270063Sluigistatic int
236341478Svmaffionevtnet_netmap_kring_refill(struct netmap_kring *kring, u_int nm_i, u_int head)
237270063Sluigi{
238270063Sluigi	struct netmap_adapter *na = kring->na;
239341478Svmaffione	struct ifnet *ifp = na->ifp;
240270063Sluigi	struct netmap_ring *ring = kring->ring;
241270063Sluigi	u_int ring_nr = kring->ring_id;
242270063Sluigi	u_int const lim = kring->nkr_num_slots - 1;
243270063Sluigi
244270063Sluigi	/* device-specific */
245341478Svmaffione	struct vtnet_softc *sc = ifp->if_softc;
246270063Sluigi	struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr];
247270063Sluigi	struct virtqueue *vq = rxq->vtnrx_vq;
248270063Sluigi
249270063Sluigi	/* use a local sglist, default might be short */
250270063Sluigi	struct sglist_seg ss[2];
251270097Sluigi	struct sglist sg = { ss, 0, 0, 2 };
252270063Sluigi
253341478Svmaffione	for (; nm_i != head; nm_i = nm_next(nm_i, lim)) {
254270063Sluigi		struct netmap_slot *slot = &ring->slot[nm_i];
255270063Sluigi		uint64_t paddr;
256270063Sluigi		void *addr = PNMB(na, slot, &paddr);
257341478Svmaffione		int err;
258270063Sluigi
259270063Sluigi		if (addr == NETMAP_BUF_BASE(na)) { /* bad buf */
260270063Sluigi			if (netmap_ring_reinit(kring))
261270063Sluigi				return -1;
262270063Sluigi		}
263270063Sluigi
264270063Sluigi		slot->flags &= ~NS_BUF_CHANGED;
265341478Svmaffione		sglist_reset(&sg);
266341478Svmaffione		err = sglist_append(&sg, &rxq->vtnrx_shrhdr, sc->vtnet_hdr_size);
267341478Svmaffione		err |= sglist_append_phys(&sg, paddr, NETMAP_BUF_SIZE(na));
268341478Svmaffione		KASSERT(err == 0, ("%s: cannot append to sglist %d",
269341478Svmaffione					__func__, err));
270270063Sluigi		/* writable for the host */
271341478Svmaffione		err = virtqueue_enqueue(vq, /*cookie=*/rxq, &sg,
272341478Svmaffione				/*readable=*/0, /*writeable=*/sg.sg_nseg);
273341478Svmaffione		if (unlikely(err)) {
274341478Svmaffione			if (err != ENOSPC)
275342033Svmaffione				nm_prerr("virtqueue_enqueue(%s) failed: %d",
276341478Svmaffione					kring->name, err);
277270063Sluigi			break;
278270063Sluigi		}
279270063Sluigi	}
280341478Svmaffione
281270063Sluigi	return nm_i;
282270063Sluigi}
283270063Sluigi
284341478Svmaffione/*
285341478Svmaffione * Publish netmap buffers on a RX virtqueue.
286341478Svmaffione * Returns -1 if this virtqueue is not being opened in netmap mode.
287341478Svmaffione * If the virtqueue is being opened in netmap mode, return 0 on success and
288341478Svmaffione * a positive error code on failure.
289341478Svmaffione */
290341478Svmaffionestatic int
291341478Svmaffionevtnet_netmap_rxq_populate(struct vtnet_rxq *rxq)
292341478Svmaffione{
293341478Svmaffione	struct netmap_adapter *na = NA(rxq->vtnrx_sc->vtnet_ifp);
294341478Svmaffione	struct netmap_kring *kring;
295341478Svmaffione	int error;
296341478Svmaffione
297341478Svmaffione	if (!nm_native_on(na) || rxq->vtnrx_id >= na->num_rx_rings)
298341478Svmaffione		return -1;
299341478Svmaffione
300341478Svmaffione	kring = na->rx_rings[rxq->vtnrx_id];
301341478Svmaffione	if (!(nm_kring_pending_on(kring) ||
302341478Svmaffione			kring->nr_pending_mode == NKR_NETMAP_ON))
303341478Svmaffione		return -1;
304341478Svmaffione
305341478Svmaffione	/* Expose all the RX netmap buffers. Note that the number of
306341478Svmaffione	 * netmap slots in the RX ring matches the maximum number of
307341478Svmaffione	 * 2-elements sglist that the RX virtqueue can accommodate. */
308341478Svmaffione	error = vtnet_netmap_kring_refill(kring, 0, na->num_rx_desc);
309341478Svmaffione	virtqueue_notify(rxq->vtnrx_vq);
310341478Svmaffione
311341478Svmaffione	return error < 0 ? ENXIO : 0;
312341478Svmaffione}
313341478Svmaffione
314270063Sluigi/* Reconcile kernel and user view of the receive ring. */
315270063Sluigistatic int
316270063Sluigivtnet_netmap_rxsync(struct netmap_kring *kring, int flags)
317270063Sluigi{
318270063Sluigi	struct netmap_adapter *na = kring->na;
319341478Svmaffione	struct ifnet *ifp = na->ifp;
320270063Sluigi	struct netmap_ring *ring = kring->ring;
321270063Sluigi	u_int ring_nr = kring->ring_id;
322270063Sluigi	u_int nm_i;	/* index into the netmap ring */
323270063Sluigi	u_int const lim = kring->nkr_num_slots - 1;
324285349Sluigi	u_int const head = kring->rhead;
325341478Svmaffione	int force_update = (flags & NAF_FORCE_READ) ||
326341478Svmaffione				(kring->nr_kflags & NKR_PENDINTR);
327341477Svmaffione	int interrupts = !(kring->nr_kflags & NKR_NOINTR);
328270063Sluigi
329270063Sluigi	/* device-specific */
330341478Svmaffione	struct vtnet_softc *sc = ifp->if_softc;
331270063Sluigi	struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr];
332270063Sluigi	struct virtqueue *vq = rxq->vtnrx_vq;
333270063Sluigi
334270063Sluigi	rmb();
335270063Sluigi	/*
336270063Sluigi	 * First part: import newly received packets.
337341478Svmaffione	 * Only accept our own buffers (matching the token). We should only get
338341478Svmaffione	 * matching buffers. We may need to stop early to avoid hwtail to overrun
339341478Svmaffione	 * hwcur.
340270063Sluigi	 */
341270063Sluigi	if (netmap_no_pendintr || force_update) {
342341478Svmaffione		uint32_t hwtail_lim = nm_prev(kring->nr_hwcur, lim);
343341478Svmaffione		void *token;
344270063Sluigi
345341478Svmaffione		vtnet_rxq_disable_intr(rxq);
346341478Svmaffione
347341478Svmaffione		nm_i = kring->nr_hwtail;
348341478Svmaffione		while (nm_i != hwtail_lim) {
349270063Sluigi			int len;
350341478Svmaffione			token = virtqueue_dequeue(vq, &len);
351341478Svmaffione			if (token == NULL) {
352341478Svmaffione				if (interrupts && vtnet_rxq_enable_intr(rxq)) {
353341478Svmaffione					vtnet_rxq_disable_intr(rxq);
354341478Svmaffione					continue;
355341478Svmaffione				}
356341478Svmaffione				break;
357341478Svmaffione			}
358341478Svmaffione			if (unlikely(token != (void *)rxq)) {
359342033Svmaffione				nm_prerr("BUG: RX token mismatch");
360341478Svmaffione			} else {
361341478Svmaffione				/* Skip the virtio-net header. */
362341478Svmaffione				len -= sc->vtnet_hdr_size;
363341478Svmaffione				if (unlikely(len < 0)) {
364344047Svmaffione					nm_prlim(1, "Truncated virtio-net-header, "
365341478Svmaffione						"missing %d bytes", -len);
366341478Svmaffione					len = 0;
367341478Svmaffione				}
368341478Svmaffione				ring->slot[nm_i].len = len;
369341478Svmaffione				ring->slot[nm_i].flags = 0;
370341478Svmaffione				nm_i = nm_next(nm_i, lim);
371341478Svmaffione			}
372270063Sluigi		}
373270063Sluigi		kring->nr_hwtail = nm_i;
374270063Sluigi		kring->nr_kflags &= ~NKR_PENDINTR;
375270063Sluigi	}
376344047Svmaffione	nm_prdis("[B] h %d c %d hwcur %d hwtail %d", ring->head, ring->cur,
377341478Svmaffione				kring->nr_hwcur, kring->nr_hwtail);
378270063Sluigi
379270063Sluigi	/*
380270063Sluigi	 * Second part: skip past packets that userspace has released.
381270063Sluigi	 */
382270063Sluigi	nm_i = kring->nr_hwcur; /* netmap ring index */
383270063Sluigi	if (nm_i != head) {
384341478Svmaffione		int nm_j = vtnet_netmap_kring_refill(kring, nm_i, head);
385341478Svmaffione		if (nm_j < 0)
386341478Svmaffione			return nm_j;
387341478Svmaffione		kring->nr_hwcur = nm_j;
388270063Sluigi		virtqueue_notify(vq);
389270063Sluigi	}
390270063Sluigi
391344047Svmaffione	nm_prdis("[C] h %d c %d t %d hwcur %d hwtail %d", ring->head, ring->cur,
392341478Svmaffione		ring->tail, kring->nr_hwcur, kring->nr_hwtail);
393270063Sluigi
394270063Sluigi	return 0;
395270063Sluigi}
396270063Sluigi
397270063Sluigi
398341477Svmaffione/* Enable/disable interrupts on all virtqueues. */
399341477Svmaffionestatic void
400341478Svmaffionevtnet_netmap_intr(struct netmap_adapter *na, int state)
401341477Svmaffione{
402341478Svmaffione	struct vtnet_softc *sc = na->ifp->if_softc;
403341477Svmaffione	int i;
404341477Svmaffione
405341477Svmaffione	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
406341477Svmaffione		struct vtnet_rxq *rxq = &sc->vtnet_rxqs[i];
407341477Svmaffione		struct vtnet_txq *txq = &sc->vtnet_txqs[i];
408341477Svmaffione		struct virtqueue *txvq = txq->vtntx_vq;
409341477Svmaffione
410341478Svmaffione		if (state) {
411341477Svmaffione			vtnet_rxq_enable_intr(rxq);
412341477Svmaffione			virtqueue_enable_intr(txvq);
413341477Svmaffione		} else {
414341477Svmaffione			vtnet_rxq_disable_intr(rxq);
415341477Svmaffione			virtqueue_disable_intr(txvq);
416341477Svmaffione		}
417341477Svmaffione	}
418341477Svmaffione}
419341477Svmaffione
420270063Sluigistatic int
421341478Svmaffionevtnet_netmap_tx_slots(struct vtnet_softc *sc)
422270063Sluigi{
423341478Svmaffione	int div;
424270063Sluigi
425341478Svmaffione	/* We need to prepend a virtio-net header to each netmap buffer to be
426341478Svmaffione	 * transmitted, therefore calling virtqueue_enqueue() passing sglist
427341478Svmaffione	 * with 2 elements.
428341478Svmaffione	 * TX virtqueues use indirect descriptors if the feature was negotiated
429341478Svmaffione	 * with the host, and if sc->vtnet_tx_nsegs > 1. With indirect
430341478Svmaffione	 * descriptors, a single virtio descriptor is sufficient to reference
431341478Svmaffione	 * each TX sglist. Without them, we need two separate virtio descriptors
432341478Svmaffione	 * for each TX sglist. We therefore compute the number of netmap TX
433341478Svmaffione	 * slots according to these assumptions.
434341478Svmaffione	 */
435341478Svmaffione	if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) && sc->vtnet_tx_nsegs > 1)
436341478Svmaffione		div = 1;
437341478Svmaffione	else
438341478Svmaffione		div = 2;
439270063Sluigi
440341478Svmaffione	return virtqueue_size(sc->vtnet_txqs[0].vtntx_vq) / div;
441341478Svmaffione}
442270063Sluigi
443341478Svmaffionestatic int
444341478Svmaffionevtnet_netmap_rx_slots(struct vtnet_softc *sc)
445341478Svmaffione{
446341478Svmaffione	int div;
447341478Svmaffione
448341478Svmaffione	/* We need to prepend a virtio-net header to each netmap buffer to be
449341478Svmaffione	 * received, therefore calling virtqueue_enqueue() passing sglist
450341478Svmaffione	 * with 2 elements.
451341478Svmaffione	 * RX virtqueues use indirect descriptors if the feature was negotiated
452341478Svmaffione	 * with the host, and if sc->vtnet_rx_nsegs > 1. With indirect
453341478Svmaffione	 * descriptors, a single virtio descriptor is sufficient to reference
454341478Svmaffione	 * each RX sglist. Without them, we need two separate virtio descriptors
455341478Svmaffione	 * for each RX sglist. We therefore compute the number of netmap RX
456341478Svmaffione	 * slots according to these assumptions.
457341478Svmaffione	 */
458341478Svmaffione	if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) && sc->vtnet_rx_nsegs > 1)
459341478Svmaffione		div = 1;
460341478Svmaffione	else
461341478Svmaffione		div = 2;
462341478Svmaffione
463341478Svmaffione	return virtqueue_size(sc->vtnet_rxqs[0].vtnrx_vq) / div;
464270063Sluigi}
465270063Sluigi
466341478Svmaffionestatic int
467341478Svmaffionevtnet_netmap_config(struct netmap_adapter *na, struct nm_config_info *info)
468341478Svmaffione{
469341478Svmaffione	struct vtnet_softc *sc = na->ifp->if_softc;
470341478Svmaffione
471341478Svmaffione	info->num_tx_rings = sc->vtnet_act_vq_pairs;
472341478Svmaffione	info->num_rx_rings = sc->vtnet_act_vq_pairs;
473341478Svmaffione	info->num_tx_descs = vtnet_netmap_tx_slots(sc);
474341478Svmaffione	info->num_rx_descs = vtnet_netmap_rx_slots(sc);
475341478Svmaffione	info->rx_buf_maxsize = NETMAP_BUF_SIZE(na);
476341478Svmaffione
477341478Svmaffione	return 0;
478341478Svmaffione}
479341478Svmaffione
480270063Sluigistatic void
481341478Svmaffionevtnet_netmap_attach(struct vtnet_softc *sc)
482270063Sluigi{
483270063Sluigi	struct netmap_adapter na;
484270063Sluigi
485270063Sluigi	bzero(&na, sizeof(na));
486270063Sluigi
487270063Sluigi	na.ifp = sc->vtnet_ifp;
488341478Svmaffione	na.na_flags = 0;
489341478Svmaffione	na.num_tx_desc = vtnet_netmap_tx_slots(sc);
490341478Svmaffione	na.num_rx_desc = vtnet_netmap_rx_slots(sc);
491341478Svmaffione	na.num_tx_rings = na.num_rx_rings = sc->vtnet_max_vq_pairs;
492341478Svmaffione	na.rx_buf_maxsize = 0;
493270063Sluigi	na.nm_register = vtnet_netmap_reg;
494270063Sluigi	na.nm_txsync = vtnet_netmap_txsync;
495270063Sluigi	na.nm_rxsync = vtnet_netmap_rxsync;
496341477Svmaffione	na.nm_intr = vtnet_netmap_intr;
497341478Svmaffione	na.nm_config = vtnet_netmap_config;
498341478Svmaffione
499270063Sluigi	netmap_attach(&na);
500270063Sluigi
501342033Svmaffione	nm_prinf("vtnet attached txq=%d, txd=%d rxq=%d, rxd=%d",
502270063Sluigi			na.num_tx_rings, na.num_tx_desc,
503270063Sluigi			na.num_tx_rings, na.num_rx_desc);
504270063Sluigi}
505270063Sluigi/* end of file */
506