if_vtnet_netmap.h revision 270252
1/*
2 * Copyright (C) 2014 Vincenzo Maffione, Luigi Rizzo. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26/*
27 * $FreeBSD: stable/10/sys/dev/netmap/if_vtnet_netmap.h 270252 2014-08-20 23:34:36Z luigi $
28 */
29
30#include <net/netmap.h>
31#include <sys/selinfo.h>
32#include <vm/vm.h>
33#include <vm/pmap.h>    /* vtophys ? */
34#include <dev/netmap/netmap_kern.h>
35
36
37#define SOFTC_T	vtnet_softc
38
39/* Free all the unused buffer in all the RX virtqueues.
40 * This function is called when entering and exiting netmap mode.
41 * - buffers queued by the virtio driver return skbuf/mbuf pointer
42 *   and need to be freed;
43 * - buffers queued by netmap return the txq/rxq, and do not need work
44 */
45static void
46vtnet_netmap_free_bufs(struct SOFTC_T* sc)
47{
48	int i, nmb = 0, n = 0, last;
49
50	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
51		struct vtnet_rxq *rxq = &sc->vtnet_rxqs[i];
52		struct virtqueue *vq;
53		struct mbuf *m;
54		struct vtnet_txq *txq = &sc->vtnet_txqs[i];
55                struct vtnet_tx_header *txhdr;
56
57		last = 0;
58		vq = rxq->vtnrx_vq;
59		while ((m = virtqueue_drain(vq, &last)) != NULL) {
60			n++;
61			if (m != (void *)rxq)
62				m_freem(m);
63			else
64				nmb++;
65		}
66
67		last = 0;
68		vq = txq->vtntx_vq;
69		while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
70			n++;
71			if (txhdr != (void *)txq) {
72				m_freem(txhdr->vth_mbuf);
73				uma_zfree(vtnet_tx_header_zone, txhdr);
74			} else
75				nmb++;
76		}
77	}
78	D("freed %d mbufs, %d netmap bufs on %d queues",
79		n - nmb, nmb, i);
80}
81
82/* Register and unregister. */
83static int
84vtnet_netmap_reg(struct netmap_adapter *na, int onoff)
85{
86        struct ifnet *ifp = na->ifp;
87	struct SOFTC_T *sc = ifp->if_softc;
88
89	VTNET_CORE_LOCK(sc);
90	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
91	/* enable or disable flags and callbacks in na and ifp */
92	if (onoff) {
93		nm_set_native_flags(na);
94	} else {
95		nm_clear_native_flags(na);
96	}
97	/* drain queues so netmap and native drivers
98	 * do not interfere with each other
99	 */
100	vtnet_netmap_free_bufs(sc);
101        vtnet_init_locked(sc);       /* also enable intr */
102        VTNET_CORE_UNLOCK(sc);
103        return (ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1);
104}
105
106
107/* Reconcile kernel and user view of the transmit ring. */
108static int
109vtnet_netmap_txsync(struct netmap_kring *kring, int flags)
110{
111	struct netmap_adapter *na = kring->na;
112        struct ifnet *ifp = na->ifp;
113	struct netmap_ring *ring = kring->ring;
114	u_int ring_nr = kring->ring_id;
115	u_int nm_i;	/* index into the netmap ring */
116	u_int nic_i;	/* index into the NIC ring */
117	u_int n;
118	u_int const lim = kring->nkr_num_slots - 1;
119	u_int const head = kring->rhead;
120
121	/* device-specific */
122	struct SOFTC_T *sc = ifp->if_softc;
123	struct vtnet_txq *txq = &sc->vtnet_txqs[ring_nr];
124	struct virtqueue *vq = txq->vtntx_vq;
125
126	/*
127	 * First part: process new packets to send.
128	 */
129	rmb();
130
131	nm_i = kring->nr_hwcur;
132	if (nm_i != head) {	/* we have new packets to send */
133		struct sglist *sg = txq->vtntx_sg;
134
135		nic_i = netmap_idx_k2n(kring, nm_i);
136		for (n = 0; nm_i != head; n++) {
137			/* we use an empty header here */
138			static struct virtio_net_hdr_mrg_rxbuf hdr;
139			struct netmap_slot *slot = &ring->slot[nm_i];
140			u_int len = slot->len;
141			uint64_t paddr;
142			void *addr = PNMB(na, slot, &paddr);
143                        int err;
144
145			NM_CHECK_ADDR_LEN(na, addr, len);
146
147			slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
148			/* Initialize the scatterlist, expose it to the hypervisor,
149			 * and kick the hypervisor (if necessary).
150			 */
151			sglist_reset(sg); // cheap
152			// if vtnet_hdr_size > 0 ...
153			err = sglist_append(sg, &hdr, sc->vtnet_hdr_size);
154			// XXX later, support multi segment
155			err = sglist_append_phys(sg, paddr, len);
156			/* use na as the cookie */
157                        err = virtqueue_enqueue(vq, txq, sg, sg->sg_nseg, 0);
158                        if (unlikely(err < 0)) {
159                                D("virtqueue_enqueue failed");
160                                break;
161                        }
162
163			nm_i = nm_next(nm_i, lim);
164			nic_i = nm_next(nic_i, lim);
165		}
166		/* Update hwcur depending on where we stopped. */
167		kring->nr_hwcur = nm_i; /* note we migth break early */
168
169		/* No more free TX slots? Ask the hypervisor for notifications,
170		 * possibly only when a considerable amount of work has been
171		 * done.
172		 */
173		ND(3,"sent %d packets, hwcur %d", n, nm_i);
174		virtqueue_disable_intr(vq);
175		virtqueue_notify(vq);
176	} else {
177		if (ring->head != ring->tail)
178		    ND(5, "pure notify ? head %d tail %d nused %d %d",
179			ring->head, ring->tail, virtqueue_nused(vq),
180			(virtqueue_dump(vq), 1));
181		virtqueue_notify(vq);
182		virtqueue_enable_intr(vq); // like postpone with 0
183	}
184
185
186        /* Free used slots. We only consider our own used buffers, recognized
187	 * by the token we passed to virtqueue_add_outbuf.
188	 */
189        n = 0;
190        for (;;) {
191                struct vtnet_tx_header *txhdr = virtqueue_dequeue(vq, NULL);
192                if (txhdr == NULL)
193                        break;
194                if (likely(txhdr == (void *)txq)) {
195                        n++;
196			if (virtqueue_nused(vq) < 32) { // XXX slow release
197				break;
198			}
199		} else { /* leftover from previous transmission */
200			m_freem(txhdr->vth_mbuf);
201			uma_zfree(vtnet_tx_header_zone, txhdr);
202		}
203        }
204	if (n) {
205		kring->nr_hwtail += n;
206		if (kring->nr_hwtail > lim)
207			kring->nr_hwtail -= lim + 1;
208	}
209	if (nm_i != kring->nr_hwtail /* && vtnet_txq_below_threshold(txq) == 0*/) {
210		ND(3, "disable intr, hwcur %d", nm_i);
211		virtqueue_disable_intr(vq);
212	} else {
213		ND(3, "enable intr, hwcur %d", nm_i);
214		virtqueue_postpone_intr(vq, VQ_POSTPONE_SHORT);
215	}
216
217//out:
218	nm_txsync_finalize(kring);
219
220        return 0;
221}
222
223static int
224vtnet_refill_rxq(struct netmap_kring *kring, u_int nm_i, u_int head)
225{
226	struct netmap_adapter *na = kring->na;
227        struct ifnet *ifp = na->ifp;
228	struct netmap_ring *ring = kring->ring;
229	u_int ring_nr = kring->ring_id;
230	u_int const lim = kring->nkr_num_slots - 1;
231	u_int n;
232
233	/* device-specific */
234	struct SOFTC_T *sc = ifp->if_softc;
235	struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr];
236	struct virtqueue *vq = rxq->vtnrx_vq;
237
238	/* use a local sglist, default might be short */
239	struct sglist_seg ss[2];
240	struct sglist sg = { ss, 0, 0, 2 };
241
242	for (n = 0; nm_i != head; n++) {
243		static struct virtio_net_hdr_mrg_rxbuf hdr;
244		struct netmap_slot *slot = &ring->slot[nm_i];
245		uint64_t paddr;
246		void *addr = PNMB(na, slot, &paddr);
247		int err = 0;
248
249		if (addr == NETMAP_BUF_BASE(na)) { /* bad buf */
250			if (netmap_ring_reinit(kring))
251				return -1;
252		}
253
254		slot->flags &= ~NS_BUF_CHANGED;
255		sglist_reset(&sg); // cheap
256		err = sglist_append(&sg, &hdr, sc->vtnet_hdr_size);
257		err = sglist_append_phys(&sg, paddr, NETMAP_BUF_SIZE(na));
258		/* writable for the host */
259		err = virtqueue_enqueue(vq, rxq, &sg, 0, sg.sg_nseg);
260		if (err < 0) {
261			D("virtqueue_enqueue failed");
262			break;
263		}
264		nm_i = nm_next(nm_i, lim);
265	}
266	return nm_i;
267}
268
269/* Reconcile kernel and user view of the receive ring. */
270static int
271vtnet_netmap_rxsync(struct netmap_kring *kring, int flags)
272{
273	struct netmap_adapter *na = kring->na;
274        struct ifnet *ifp = na->ifp;
275	struct netmap_ring *ring = kring->ring;
276	u_int ring_nr = kring->ring_id;
277	u_int nm_i;	/* index into the netmap ring */
278	// u_int nic_i;	/* index into the NIC ring */
279	u_int n;
280	u_int const lim = kring->nkr_num_slots - 1;
281	u_int const head = nm_rxsync_prologue(kring);
282	int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
283
284	/* device-specific */
285	struct SOFTC_T *sc = ifp->if_softc;
286	struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr];
287	struct virtqueue *vq = rxq->vtnrx_vq;
288
289	/* XXX netif_carrier_ok ? */
290
291	if (head > lim)
292		return netmap_ring_reinit(kring);
293
294	rmb();
295	/*
296	 * First part: import newly received packets.
297	 * Only accept our
298	 * own buffers (matching the token). We should only get
299	 * matching buffers, because of vtnet_netmap_free_rx_unused_bufs()
300	 * and vtnet_netmap_init_buffers().
301	 */
302	if (netmap_no_pendintr || force_update) {
303		uint16_t slot_flags = kring->nkr_slot_flags;
304                struct netmap_adapter *token;
305
306                nm_i = kring->nr_hwtail;
307                n = 0;
308		for (;;) {
309			int len;
310                        token = virtqueue_dequeue(vq, &len);
311                        if (token == NULL)
312                                break;
313                        if (likely(token == (void *)rxq)) {
314                            ring->slot[nm_i].len = len;
315                            ring->slot[nm_i].flags = slot_flags;
316                            nm_i = nm_next(nm_i, lim);
317                            n++;
318                        } else {
319			    D("This should not happen");
320                        }
321		}
322		kring->nr_hwtail = nm_i;
323		kring->nr_kflags &= ~NKR_PENDINTR;
324	}
325        ND("[B] h %d c %d hwcur %d hwtail %d",
326		ring->head, ring->cur, kring->nr_hwcur,
327			      kring->nr_hwtail);
328
329	/*
330	 * Second part: skip past packets that userspace has released.
331	 */
332	nm_i = kring->nr_hwcur; /* netmap ring index */
333	if (nm_i != head) {
334		int err = vtnet_refill_rxq(kring, nm_i, head);
335		if (err < 0)
336			return 1;
337		kring->nr_hwcur = err;
338		virtqueue_notify(vq);
339		/* After draining the queue may need an intr from the hypervisor */
340        	vtnet_rxq_enable_intr(rxq);
341	}
342
343	/* tell userspace that there might be new packets. */
344	nm_rxsync_finalize(kring);
345
346        ND("[C] h %d c %d t %d hwcur %d hwtail %d",
347		ring->head, ring->cur, ring->tail,
348		kring->nr_hwcur, kring->nr_hwtail);
349
350	return 0;
351}
352
353
354/* Make RX virtqueues buffers pointing to netmap buffers. */
355static int
356vtnet_netmap_init_rx_buffers(struct SOFTC_T *sc)
357{
358	struct ifnet *ifp = sc->vtnet_ifp;
359	struct netmap_adapter* na = NA(ifp);
360	unsigned int r;
361
362	if (!nm_native_on(na))
363		return 0;
364	for (r = 0; r < na->num_rx_rings; r++) {
365                struct netmap_kring *kring = &na->rx_rings[r];
366		struct vtnet_rxq *rxq = &sc->vtnet_rxqs[r];
367		struct virtqueue *vq = rxq->vtnrx_vq;
368	        struct netmap_slot* slot;
369		int err = 0;
370
371		slot = netmap_reset(na, NR_RX, r, 0);
372		if (!slot) {
373			D("strange, null netmap ring %d", r);
374			return 0;
375		}
376		/* Add up to na>-num_rx_desc-1 buffers to this RX virtqueue.
377		 * It's important to leave one virtqueue slot free, otherwise
378		 * we can run into ring->cur/ring->tail wraparounds.
379		 */
380		err = vtnet_refill_rxq(kring, 0, na->num_rx_desc-1);
381		if (err < 0)
382			return 0;
383		virtqueue_notify(vq);
384	}
385
386	return 1;
387}
388
389/* Update the virtio-net device configurations. Number of queues can
390 * change dinamically, by 'ethtool --set-channels $IFNAME combined $N'.
391 * This is actually the only way virtio-net can currently enable
392 * the multiqueue mode.
393 * XXX note that we seem to lose packets if the netmap ring has more
394 * slots than the queue
395 */
396static int
397vtnet_netmap_config(struct netmap_adapter *na, u_int *txr, u_int *txd,
398						u_int *rxr, u_int *rxd)
399{
400	struct ifnet *ifp = na->ifp;
401	struct SOFTC_T *sc = ifp->if_softc;
402
403	*txr = *rxr = sc->vtnet_max_vq_pairs;
404	*rxd = 512; // sc->vtnet_rx_nmbufs;
405	*txd = *rxd; // XXX
406        D("vtnet config txq=%d, txd=%d rxq=%d, rxd=%d",
407					*txr, *txd, *rxr, *rxd);
408
409	return 0;
410}
411
412static void
413vtnet_netmap_attach(struct SOFTC_T *sc)
414{
415	struct netmap_adapter na;
416
417	bzero(&na, sizeof(na));
418
419	na.ifp = sc->vtnet_ifp;
420	na.num_tx_desc =  1024;// sc->vtnet_rx_nmbufs;
421	na.num_rx_desc =  1024; // sc->vtnet_rx_nmbufs;
422	na.nm_register = vtnet_netmap_reg;
423	na.nm_txsync = vtnet_netmap_txsync;
424	na.nm_rxsync = vtnet_netmap_rxsync;
425	na.nm_config = vtnet_netmap_config;
426	na.num_tx_rings = na.num_rx_rings = sc->vtnet_max_vq_pairs;
427	D("max rings %d", sc->vtnet_max_vq_pairs);
428	netmap_attach(&na);
429
430        D("virtio attached txq=%d, txd=%d rxq=%d, rxd=%d",
431			na.num_tx_rings, na.num_tx_desc,
432			na.num_tx_rings, na.num_rx_desc);
433}
434/* end of file */
435