1/*
2 * Copyright (C) 2019 Universita` di Pisa.
3 * Sponsored by Sunny Valley Networks.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27/* $FreeBSD: stable/11/sys/dev/vmware/vmxnet3/vmx_netmap.h 344272 2019-02-19 10:07:48Z vmaffione $ */
28
29#include <net/netmap.h>
30#include <dev/netmap/netmap_kern.h>
31
32static int
33vmxnet3_netmap_reg(struct netmap_adapter *na, int onoff)
34{
35	struct ifnet *ifp = na->ifp;
36	struct vmxnet3_softc *sc = ifp->if_softc;
37
38	VMXNET3_CORE_LOCK(sc);
39	vmxnet3_stop(sc);
40	if (onoff) {
41		nm_set_native_flags(na);
42	} else {
43		nm_clear_native_flags(na);
44	}
45	vmxnet3_init_locked(sc);
46	VMXNET3_CORE_UNLOCK(sc);
47	return 0;
48}
49
50static void
51vmxnet3_netmap_rxq_init(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq,
52		struct vmxnet3_rxring *rxr, struct netmap_slot *slot)
53{
54	struct ifnet *ifp = sc->vmx_ifp;
55	struct netmap_adapter *na = NA(ifp);
56	struct vmxnet3_rxdesc *rxd;
57	int q, i;
58
59	q = rxq - sc->vmx_rxq;
60
61	for (i = 0; ; i++) {
62		int idx = rxr->vxrxr_fill;
63		int si = netmap_idx_n2k(na->rx_rings[q], idx);
64		struct vmxnet3_rxbuf  *rxb = &rxr->vxrxr_rxbuf[idx];
65		uint64_t paddr;
66		void *addr;
67
68		addr = PNMB(na, slot +  si, &paddr);
69		netmap_load_map(na, rxr->vxrxr_rxtag, rxb->vrxb_dmamap, addr);
70
71		rxd = &rxr->vxrxr_rxd[idx];
72		rxd->addr = paddr;
73		rxd->len = NETMAP_BUF_SIZE(na);
74		rxd->gen = rxr->vxrxr_gen ^ 1;
75		rxd->btype = VMXNET3_BTYPE_HEAD;
76		nm_prdis("%d: addr %lx len %u btype %u gen %u",
77			idx, rxd->addr, rxd->len, rxd->btype, rxd->gen);
78
79		if (i == rxr->vxrxr_ndesc -1)
80			break;
81
82		rxd->gen ^= 1;
83		vmxnet3_rxr_increment_fill(rxr);
84	}
85}
86
87static void
88vmxnet3_netmap_txq_init(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq)
89{
90	struct ifnet *ifp = sc->vmx_ifp;
91	struct netmap_adapter *na;
92	struct netmap_slot *slot;
93	struct vmxnet3_txring *txr;
94	int i, gen, q;
95
96	q = txq - sc->vmx_txq;
97
98	na = NA(ifp);
99
100	slot = netmap_reset(na, NR_TX, q, 0);
101	if (slot == NULL)
102		return;
103
104	txr = &txq->vxtxq_cmd_ring;
105	gen = txr->vxtxr_gen ^ 1;
106
107	for (i = 0; i < txr->vxtxr_ndesc; i++) {
108		int si = netmap_idx_n2k(na->tx_rings[q], i);
109		struct vmxnet3_txdesc *txd = &txr->vxtxr_txd[i];
110		uint64_t paddr;
111		void *addr;
112
113		addr = PNMB(na, slot +  si, &paddr);
114
115		txd->addr = paddr;
116		txd->len = 0;
117		txd->gen = gen;
118		txd->dtype = 0;
119		txd->offload_mode = VMXNET3_OM_NONE;
120		txd->offload_pos = 0;
121		txd->hlen = 0;
122		txd->eop = 0;
123		txd->compreq = 0;
124		txd->vtag_mode = 0;
125		txd->vtag = 0;
126
127		netmap_load_map(na, txr->vxtxr_txtag,
128				txr->vxtxr_txbuf[i].vtxb_dmamap, addr);
129	}
130}
131
132static int
133vmxnet3_netmap_txsync(struct netmap_kring *kring, int flags)
134{
135	struct netmap_adapter *na = kring->na;
136	struct ifnet *ifp = na->ifp;
137	struct netmap_ring *ring = kring->ring;
138	u_int nm_i;
139	u_int nic_i;
140	u_int n;
141	u_int const lim = kring->nkr_num_slots - 1;
142	u_int const head = kring->rhead;
143
144	/*
145	 * interrupts on every tx packet are expensive so request
146	 * them every half ring, or where NS_REPORT is set
147	 */
148	u_int report_frequency = kring->nkr_num_slots >> 1;
149	/* device specific */
150	struct vmxnet3_softc *sc = ifp->if_softc;
151	struct vmxnet3_txqueue *txq = &sc->vmx_txq[kring->ring_id];
152	struct vmxnet3_txring *txr = &txq->vxtxq_cmd_ring;
153	struct vmxnet3_comp_ring *txc = &txq->vxtxq_comp_ring;
154	struct vmxnet3_txcompdesc *txcd = txc->vxcr_u.txcd;
155	int gen = txr->vxtxr_gen;
156
157	/* no need to dma-sync the ring; memory barriers are sufficient */
158
159	nm_i = kring->nr_hwcur;
160	if (nm_i != head) {
161		nic_i = netmap_idx_k2n(kring, nm_i);
162		for (n = 0; nm_i != head; n++) {
163			struct netmap_slot *slot = &ring->slot[nm_i];
164			u_int len = slot->len;
165			uint64_t paddr;
166			void *addr = PNMB(na, slot, &paddr);
167			int compreq = !!(slot->flags & NS_REPORT ||
168				nic_i == 0 || nic_i == report_frequency);
169
170			/* device specific */
171			struct vmxnet3_txdesc *curr = &txr->vxtxr_txd[nic_i];
172			struct vmxnet3_txbuf *txbuf = &txr->vxtxr_txbuf[nic_i];
173
174			NM_CHECK_ADDR_LEN(na, addr, len);
175
176			/* fill the slot in the NIC ring */
177			curr->len = len;
178			curr->eop = 1; /* NS_MOREFRAG not supported */
179			curr->compreq = compreq;
180
181			if (slot->flags & NS_BUF_CHANGED) {
182				curr->addr = paddr;
183				netmap_reload_map(na, txr->vxtxr_txtag,
184						txbuf->vtxb_dmamap, addr);
185			}
186			slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
187
188			/* make sure changes to the buffer are synced */
189			bus_dmamap_sync(txr->vxtxr_txtag, txbuf->vtxb_dmamap,
190					BUS_DMASYNC_PREWRITE);
191
192			/* pass ownership */
193			vmxnet3_barrier(sc, VMXNET3_BARRIER_WR);
194			curr->gen = gen;
195
196			nm_i = nm_next(nm_i, lim);
197			nic_i++;
198			if (unlikely(nic_i == lim + 1)) {
199				nic_i = 0;
200				gen = txr->vxtxr_gen ^= 1;
201			}
202		}
203
204		vmxnet3_write_bar0(sc, VMXNET3_BAR0_TXH(txq->vxtxq_id), nic_i);
205	}
206	kring->nr_hwcur = nm_i;
207
208	/* reclaim completed packets */
209	for (;;) {
210		u_int sop;
211		struct vmxnet3_txbuf *txb;
212
213		txcd = &txc->vxcr_u.txcd[txc->vxcr_next];
214		if (txcd->gen != txc->vxcr_gen)
215			break;
216
217		vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
218
219		if (++txc->vxcr_next == txc->vxcr_ndesc) {
220			txc->vxcr_next = 0;
221			txc->vxcr_gen ^= 1;
222		}
223
224		sop = txr->vxtxr_next;
225		txb = &txr->vxtxr_txbuf[sop];
226
227		bus_dmamap_sync(txr->vxtxr_txtag, txb->vtxb_dmamap,
228		   BUS_DMASYNC_POSTWRITE);
229
230		txr->vxtxr_next = (txcd->eop_idx + 1) % txr->vxtxr_ndesc;
231	}
232	kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, txr->vxtxr_next), lim);
233
234	return 0;
235}
236
237static int
238vmxnet3_netmap_rxsync(struct netmap_kring *kring, int flags)
239{
240	struct netmap_adapter *na = kring->na;
241	struct netmap_ring *ring = kring->ring;
242	u_int nm_i;
243	u_int nic_i;
244	u_int const lim = kring->nkr_num_slots - 1;
245	u_int const head = kring->rhead;
246	int force_update = (flags & NAF_FORCE_READ);
247
248	struct ifnet *ifp = na->ifp;
249	struct vmxnet3_softc *sc = ifp->if_softc;
250	struct vmxnet3_rxqueue *rxq = &sc->vmx_rxq[kring->ring_id];
251	struct vmxnet3_rxring *rxr;
252	struct vmxnet3_comp_ring *rxc;
253
254	if (head > lim)
255		return netmap_ring_reinit(kring);
256
257	rxr = &rxq->vxrxq_cmd_ring[0];
258
259	/* no need to dma-sync the ring; memory barriers are sufficient */
260
261	/* first part: import newly received packets */
262	if (netmap_no_pendintr || force_update) {
263		rxc = &rxq->vxrxq_comp_ring;
264		nm_i = kring->nr_hwtail;
265		nic_i = netmap_idx_k2n(kring, nm_i);
266		for (;;) {
267			struct vmxnet3_rxcompdesc *rxcd;
268			struct vmxnet3_rxbuf *rxb;
269
270			rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next];
271
272			if (rxcd->gen != rxc->vxcr_gen)
273				break;
274			vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
275
276			while (__predict_false(rxcd->rxd_idx != nic_i)) {
277				nm_prlim(1, "%u skipped! idx %u", nic_i, rxcd->rxd_idx);
278				/* to shelter the application from this  we
279				 * would need to rotate the kernel-owned
280				 * portion of the netmap and nic rings. We
281				 * return len=0 for now and hope for the best.
282				 */
283				ring->slot[nm_i].len = 0;
284				nic_i = nm_next(nm_i, lim);
285				nm_i = nm_next(nm_i, lim);
286			}
287
288			rxb = &rxr->vxrxr_rxbuf[nic_i];
289
290			ring->slot[nm_i].len = rxcd->len;
291			ring->slot[nm_i].flags = 0;
292
293			bus_dmamap_sync(rxr->vxrxr_rxtag, rxb->vrxb_dmamap,
294					BUS_DMASYNC_POSTREAD);
295
296			nic_i = nm_next(nm_i, lim);
297			nm_i = nm_next(nm_i, lim);
298
299			rxc->vxcr_next++;
300			if (__predict_false(rxc->vxcr_next == rxc->vxcr_ndesc)) {
301				rxc->vxcr_next = 0;
302				rxc->vxcr_gen ^= 1;
303			}
304		}
305		kring->nr_hwtail = nm_i;
306	}
307	/* second part: skip past packets that userspace has released */
308	nm_i = kring->nr_hwcur;
309	if (nm_i != head) {
310		nic_i = netmap_idx_k2n(kring, nm_i);
311		while (nm_i != head) {
312			struct netmap_slot *slot = &ring->slot[nm_i];
313			struct vmxnet3_rxdesc *rxd_fill;
314			struct vmxnet3_rxbuf *rxbuf;
315
316			if (slot->flags & NS_BUF_CHANGED) {
317				uint64_t paddr;
318				void *addr = PNMB(na, slot, &paddr);
319				struct vmxnet3_rxdesc *rxd = &rxr->vxrxr_rxd[nic_i];
320
321
322				if (addr == NETMAP_BUF_BASE(na))
323					return netmap_ring_reinit(kring);
324
325				rxd->addr = paddr;
326				rxbuf = &rxr->vxrxr_rxbuf[nic_i];
327				netmap_reload_map(na, rxr->vxrxr_rxtag,
328						rxbuf->vrxb_dmamap, addr);
329				slot->flags &= ~NS_BUF_CHANGED;
330				vmxnet3_barrier(sc, VMXNET3_BARRIER_WR);
331			}
332
333			rxd_fill = &rxr->vxrxr_rxd[rxr->vxrxr_fill];
334			rxbuf = &rxr->vxrxr_rxbuf[rxr->vxrxr_fill];
335
336			bus_dmamap_sync(rxr->vxrxr_rxtag, rxbuf->vrxb_dmamap,
337					BUS_DMASYNC_PREREAD);
338
339			rxd_fill->gen = rxr->vxrxr_gen;
340			vmxnet3_rxr_increment_fill(rxr);
341
342			nm_i = nm_next(nm_i, lim);
343			nic_i = nm_next(nic_i, lim);
344		}
345		kring->nr_hwcur = head;
346		if (__predict_false(rxq->vxrxq_rs->update_rxhead)) {
347			vmxnet3_write_bar0(sc,
348				VMXNET3_BAR0_RXH1(kring->ring_id), rxr->vxrxr_fill);
349		}
350	}
351	return 0;
352}
353
354static void
355vmxnet3_netmap_attach(struct vmxnet3_softc *sc)
356{
357	struct netmap_adapter na;
358	int enable = 0;
359
360	if (getenv_int("vmxnet3.netmap_native", &enable) < 0 || !enable) {
361		return;
362	}
363
364	bzero(&na, sizeof(na));
365
366	na.ifp = sc->vmx_ifp;
367	na.na_flags = NAF_BDG_MAYSLEEP;
368	na.num_tx_desc = sc->vmx_ntxdescs;
369	na.num_rx_desc = sc->vmx_nrxdescs;
370	na.num_tx_rings = sc->vmx_ntxqueues;
371	na.num_rx_rings = sc->vmx_nrxqueues;
372	na.nm_register = vmxnet3_netmap_reg;
373	na.nm_txsync = vmxnet3_netmap_txsync;
374	na.nm_rxsync = vmxnet3_netmap_rxsync;
375	netmap_attach(&na);
376}
377