if_re_netmap.h revision 260368
19750Sjkh/*
29750Sjkh * Copyright (C) 2011-2014 Luigi Rizzo. All rights reserved.
3652Sjkh *
4652Sjkh * Redistribution and use in source and binary forms, with or without
5652Sjkh * modification, are permitted provided that the following conditions
6652Sjkh * are met:
7652Sjkh * 1. Redistributions of source code must retain the above copyright
8652Sjkh *    notice, this list of conditions and the following disclaimer.
9652Sjkh * 2. Redistributions in binary form must reproduce the above copyright
10652Sjkh *    notice, this list of conditions and the following disclaimer in the
11652Sjkh *    documentation and/or other materials provided with the distribution.
12652Sjkh *
13652Sjkh * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14652Sjkh * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15805Sache * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16652Sjkh * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17652Sjkh * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18805Sache * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19652Sjkh * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20652Sjkh * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21652Sjkh * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22652Sjkh * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23652Sjkh * SUCH DAMAGE.
24652Sjkh */
25652Sjkh
26652Sjkh/*
279750Sjkh * $FreeBSD: head/sys/dev/netmap/if_re_netmap.h 260368 2014-01-06 12:53:15Z luigi $
28652Sjkh *
29652Sjkh * netmap support for: re
309750Sjkh *
31652Sjkh * For more details on netmap support please see ixgbe_netmap.h
329750Sjkh */
331258Sswallace
34652Sjkh
35652Sjkh#include <net/netmap.h>
36652Sjkh#include <sys/selinfo.h>
373256Sswallace#include <vm/vm.h>
38652Sjkh#include <vm/pmap.h>    /* vtophys ? */
39652Sjkh#include <dev/netmap/netmap_kern.h>
409750Sjkh
411258Sswallace
42652Sjkh/*
43652Sjkh * Register/unregister. We are already under netmap lock.
44652Sjkh */
45652Sjkhstatic int
46652Sjkhre_netmap_reg(struct netmap_adapter *na, int onoff)
47652Sjkh{
48652Sjkh	struct ifnet *ifp = na->ifp;
493256Sswallace	struct rl_softc *adapter = ifp->if_softc;
503256Sswallace
513256Sswallace	RL_LOCK(adapter);
523256Sswallace	re_stop(adapter); /* also clears IFF_DRV_RUNNING */
533256Sswallace	if (onoff) {
543256Sswallace		nm_set_native_flags(na);
553256Sswallace	} else {
563256Sswallace		nm_clear_native_flags(na);
573256Sswallace	}
583256Sswallace	re_init_locked(adapter);	/* also enables intr */
593256Sswallace	RL_UNLOCK(adapter);
609750Sjkh	return (ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1);
619750Sjkh}
629750Sjkh
639750Sjkh
649750Sjkh/*
659750Sjkh * Reconcile kernel and user view of the transmit ring.
669750Sjkh */
67652Sjkhstatic int
68652Sjkhre_netmap_txsync(struct netmap_adapter *na, u_int ring_nr, int flags)
69652Sjkh{
70652Sjkh	struct ifnet *ifp = na->ifp;
71652Sjkh	struct netmap_kring *kring = &na->tx_rings[ring_nr];
729750Sjkh	struct netmap_ring *ring = kring->ring;
739750Sjkh	u_int nm_i;	/* index into the netmap ring */
74652Sjkh	u_int nic_i;	/* index into the NIC ring */
759750Sjkh	u_int n;
769750Sjkh	u_int const lim = kring->nkr_num_slots - 1;
779750Sjkh	u_int const head = kring->rhead;
789750Sjkh
799750Sjkh	/* device-specific */
809750Sjkh	struct rl_softc *sc = ifp->if_softc;
819750Sjkh	struct rl_txdesc *txd = sc->rl_ldata.rl_tx_desc;
829750Sjkh
839750Sjkh	bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
849750Sjkh	    sc->rl_ldata.rl_tx_list_map,
859750Sjkh	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); // XXX extra postwrite ?
869750Sjkh
879750Sjkh	/*
889750Sjkh	 * First part: process new packets to send.
899750Sjkh	 */
909750Sjkh	nm_i = kring->nr_hwcur;
919750Sjkh	if (nm_i != head) {	/* we have new packets to send */
929750Sjkh		nic_i = sc->rl_ldata.rl_tx_prodidx;
939750Sjkh		// XXX or netmap_idx_k2n(kring, nm_i);
949750Sjkh
95652Sjkh		for (n = 0; nm_i != head; n++) {
96652Sjkh			struct netmap_slot *slot = &ring->slot[nm_i];
97652Sjkh			u_int len = slot->len;
98652Sjkh			uint64_t paddr;
99652Sjkh			void *addr = PNMB(slot, &paddr);
100652Sjkh
101652Sjkh			/* device-specific */
102652Sjkh			struct rl_desc *desc = &sc->rl_ldata.rl_tx_list[nic_i];
103652Sjkh			int cmd = slot->len | RL_TDESC_CMD_EOF |
104652Sjkh				RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF ;
105652Sjkh
106652Sjkh			NM_CHECK_ADDR_LEN(addr, len);
107652Sjkh
108652Sjkh			if (nic_i == lim)	/* mark end of ring */
109652Sjkh				cmd |= RL_TDESC_CMD_EOR;
110652Sjkh
111652Sjkh			if (slot->flags & NS_BUF_CHANGED) {
1123256Sswallace				/* buffer has changed, reload map */
1139750Sjkh				desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
114652Sjkh				desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
1159750Sjkh				netmap_reload_map(sc->rl_ldata.rl_tx_mtag,
1169750Sjkh					txd[nic_i].tx_dmamap, addr);
1179750Sjkh			}
1189750Sjkh			slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
1193256Sswallace
1203256Sswallace			/* Fill the slot in the NIC ring. */
1213256Sswallace			desc->rl_cmdstat = htole32(cmd);
1223256Sswallace
1233256Sswallace			/* make sure changes to the buffer are synced */
1243256Sswallace			bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
1253256Sswallace				txd[nic_i].tx_dmamap,
1263256Sswallace				BUS_DMASYNC_PREWRITE);
1273256Sswallace
1283256Sswallace			nm_i = nm_next(nm_i, lim);
1293256Sswallace			nic_i = nm_next(nic_i, lim);
1303256Sswallace		}
1313256Sswallace		sc->rl_ldata.rl_tx_prodidx = nic_i;
1323256Sswallace		kring->nr_hwcur = head;
1333256Sswallace
134652Sjkh		/* synchronize the NIC ring */
135652Sjkh		bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
136652Sjkh			sc->rl_ldata.rl_tx_list_map,
137652Sjkh			BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
138652Sjkh
139652Sjkh		/* start ? */
140652Sjkh		CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
141652Sjkh	}
142652Sjkh
143652Sjkh	/*
144652Sjkh	 * Second part: reclaim buffers for completed transmissions.
145652Sjkh	 */
146652Sjkh	if (flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) {
147652Sjkh		nic_i = sc->rl_ldata.rl_tx_considx;
148652Sjkh		for (n = 0; nic_i != sc->rl_ldata.rl_tx_prodidx;
149652Sjkh		    n++, nic_i = RL_TX_DESC_NXT(sc, nic_i)) {
150652Sjkh			uint32_t cmdstat =
151652Sjkh				le32toh(sc->rl_ldata.rl_tx_list[nic_i].rl_cmdstat);
152652Sjkh			if (cmdstat & RL_TDESC_STAT_OWN)
153652Sjkh				break;
154652Sjkh		}
155652Sjkh		if (n > 0) {
156652Sjkh			sc->rl_ldata.rl_tx_considx = nic_i;
157652Sjkh			sc->rl_ldata.rl_tx_free += n;
158652Sjkh			kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim);
159652Sjkh		}
160652Sjkh	}
161652Sjkh
162652Sjkh	nm_txsync_finalize(kring);
163652Sjkh
164652Sjkh	return 0;
165652Sjkh}
166652Sjkh
167652Sjkh
168652Sjkh/*
169652Sjkh * Reconcile kernel and user view of the receive ring.
1709750Sjkh */
171652Sjkhstatic int
172652Sjkhre_netmap_rxsync(struct netmap_adapter *na, u_int ring_nr, int flags)
173652Sjkh{
174652Sjkh	struct ifnet *ifp = na->ifp;
175652Sjkh	struct netmap_kring *kring = &na->rx_rings[ring_nr];
176652Sjkh	struct netmap_ring *ring = kring->ring;
177652Sjkh	u_int nm_i;	/* index into the netmap ring */
178652Sjkh	u_int nic_i;	/* index into the NIC ring */
179652Sjkh	u_int n;
180652Sjkh	u_int const lim = kring->nkr_num_slots - 1;
181652Sjkh	u_int const head = nm_rxsync_prologue(kring);
182652Sjkh	int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
183652Sjkh
184652Sjkh	/* device-specific */
185652Sjkh	struct rl_softc *sc = ifp->if_softc;
186652Sjkh	struct rl_rxdesc *rxd = sc->rl_ldata.rl_rx_desc;
187652Sjkh
188652Sjkh	if (head > lim)
189652Sjkh		return netmap_ring_reinit(kring);
190652Sjkh
191652Sjkh	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
192652Sjkh			sc->rl_ldata.rl_rx_list_map,
193652Sjkh			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
194652Sjkh
195652Sjkh	/*
196652Sjkh	 * First part: import newly received packets.
197652Sjkh	 *
198652Sjkh	 * This device uses all the buffers in the ring, so we need
1999750Sjkh	 * another termination condition in addition to RL_RDESC_STAT_OWN
200652Sjkh	 * cleared (all buffers could have it cleared). The easiest one
201652Sjkh	 * is to stop right before nm_hwcur.
202652Sjkh	 */
203652Sjkh	if (netmap_no_pendintr || force_update) {
204652Sjkh		uint16_t slot_flags = kring->nkr_slot_flags;
205652Sjkh		uint32_t stop_i = nm_prev(kring->nr_hwcur, lim);
206652Sjkh
207652Sjkh		nic_i = sc->rl_ldata.rl_rx_prodidx; /* next pkt to check */
2089750Sjkh		nm_i = netmap_idx_n2k(kring, nic_i);
209652Sjkh
210652Sjkh		while (nm_i != stop_i) {
211652Sjkh			struct rl_desc *cur_rx = &sc->rl_ldata.rl_rx_list[nic_i];
212652Sjkh			uint32_t rxstat = le32toh(cur_rx->rl_cmdstat);
213652Sjkh			uint32_t total_len;
214652Sjkh
2159750Sjkh			if ((rxstat & RL_RDESC_STAT_OWN) != 0)
216652Sjkh				break;
217652Sjkh			total_len = rxstat & sc->rl_rxlenmask;
2181258Sswallace			/* XXX subtract crc */
219652Sjkh			total_len = (total_len < 4) ? 0 : total_len - 4;
220652Sjkh			ring->slot[nm_i].len = total_len;
221652Sjkh			ring->slot[nm_i].flags = slot_flags;
2223256Sswallace			/*  sync was in re_newbuf() */
2233256Sswallace			bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
2243256Sswallace			    rxd[nic_i].rx_dmamap, BUS_DMASYNC_POSTREAD);
2253256Sswallace			// sc->rl_ifp->if_ipackets++;
2263256Sswallace			nm_i = nm_next(nm_i, lim);
2273256Sswallace			nic_i = nm_next(nic_i, lim);
2283256Sswallace		}
2293256Sswallace		sc->rl_ldata.rl_rx_prodidx = nic_i;
230652Sjkh		kring->nr_hwtail = nm_i;
231652Sjkh		kring->nr_kflags &= ~NKR_PENDINTR;
232652Sjkh	}
233652Sjkh
234652Sjkh	/*
235652Sjkh	 * Second part: skip past packets that userspace has released.
236652Sjkh	 */
237652Sjkh	nm_i = kring->nr_hwcur;
238652Sjkh	if (nm_i != head) {
239652Sjkh		nic_i = netmap_idx_k2n(kring, nm_i);
240652Sjkh		for (n = 0; nm_i != head; n++) {
241652Sjkh			struct netmap_slot *slot = &ring->slot[nm_i];
242652Sjkh			uint64_t paddr;
243652Sjkh			void *addr = PNMB(slot, &paddr);
244652Sjkh
245652Sjkh			struct rl_desc *desc = &sc->rl_ldata.rl_rx_list[nic_i];
246652Sjkh			int cmd = NETMAP_BUF_SIZE | RL_RDESC_CMD_OWN;
247652Sjkh
248652Sjkh			if (addr == netmap_buffer_base) /* bad buf */
249652Sjkh				goto ring_reset;
250652Sjkh
251652Sjkh			if (nic_i == lim)	/* mark end of ring */
2529750Sjkh				cmd |= RL_RDESC_CMD_EOR;
253652Sjkh
254652Sjkh			if (slot->flags & NS_BUF_CHANGED) {
255652Sjkh				/* buffer has changed, reload map */
256652Sjkh				desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
257652Sjkh				desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
258652Sjkh				netmap_reload_map(sc->rl_ldata.rl_rx_mtag,
259652Sjkh					rxd[nic_i].rx_dmamap, addr);
260652Sjkh				slot->flags &= ~NS_BUF_CHANGED;
261652Sjkh			}
262652Sjkh			desc->rl_cmdstat = htole32(cmd);
263652Sjkh			bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
264652Sjkh			    rxd[nic_i].rx_dmamap,
265652Sjkh			    BUS_DMASYNC_PREREAD);
2669750Sjkh			nm_i = nm_next(nm_i, lim);
2679750Sjkh			nic_i = nm_next(nic_i, lim);
268652Sjkh		}
269652Sjkh		kring->nr_hwcur = head;
270652Sjkh
271652Sjkh		bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
272652Sjkh		    sc->rl_ldata.rl_rx_list_map,
273652Sjkh		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
274652Sjkh	}
275652Sjkh
276652Sjkh	/* tell userspace that there might be new packets */
277652Sjkh	nm_rxsync_finalize(kring);
278652Sjkh
279652Sjkh	return 0;
280652Sjkh
281652Sjkhring_reset:
282652Sjkh	return netmap_ring_reinit(kring);
283652Sjkh}
284652Sjkh
285652Sjkh
286652Sjkh/*
2879750Sjkh * Additional routines to init the tx and rx rings.
288652Sjkh * In other drivers we do that inline in the main code.
289652Sjkh */
290652Sjkhstatic void
291652Sjkhre_netmap_tx_init(struct rl_softc *sc)
292652Sjkh{
293652Sjkh	struct rl_txdesc *txd;
294652Sjkh	struct rl_desc *desc;
295652Sjkh	int i, n;
296652Sjkh	struct netmap_adapter *na = NA(sc->rl_ifp);
297652Sjkh	struct netmap_slot *slot;
298652Sjkh
299652Sjkh	if (!na || !(na->na_flags & NAF_NATIVE_ON)) {
300652Sjkh		return;
301652Sjkh	}
302652Sjkh
303652Sjkh	slot = netmap_reset(na, NR_TX, 0, 0);
304652Sjkh	/* slot is NULL if we are not in netmap mode */
305652Sjkh	if (!slot)
306652Sjkh		return;  // XXX cannot happen
307652Sjkh	/* in netmap mode, overwrite addresses and maps */
308652Sjkh	txd = sc->rl_ldata.rl_tx_desc;
309652Sjkh	desc = sc->rl_ldata.rl_tx_list;
310652Sjkh	n = sc->rl_ldata.rl_tx_desc_cnt;
311652Sjkh
312652Sjkh	/* l points in the netmap ring, i points in the NIC ring */
313652Sjkh	for (i = 0; i < n; i++) {
314652Sjkh		uint64_t paddr;
315652Sjkh		int l = netmap_idx_n2k(&na->tx_rings[0], i);
316652Sjkh		void *addr = PNMB(slot + l, &paddr);
317652Sjkh
3189750Sjkh		desc[i].rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
319652Sjkh		desc[i].rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
320652Sjkh		netmap_load_map(sc->rl_ldata.rl_tx_mtag,
321652Sjkh			txd[i].tx_dmamap, addr);
322652Sjkh	}
323652Sjkh}
324652Sjkh
3253256Sswallacestatic void
326652Sjkhre_netmap_rx_init(struct rl_softc *sc)
327652Sjkh{
328652Sjkh	struct netmap_adapter *na = NA(sc->rl_ifp);
329652Sjkh	struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0);
330652Sjkh	struct rl_desc *desc = sc->rl_ldata.rl_rx_list;
331652Sjkh	uint32_t cmdstat;
332652Sjkh	uint32_t nic_i, max_avail;
333652Sjkh	uint32_t const n = sc->rl_ldata.rl_rx_desc_cnt;
334652Sjkh
335652Sjkh	if (!slot)
336652Sjkh		return;
337652Sjkh	/*
3383256Sswallace	 * Do not release the slots owned by userspace,
339652Sjkh	 * and also keep one empty.
340652Sjkh	 */
3413256Sswallace	max_avail = n - 1 - nm_kr_rxspace(&na->rx_rings[0]);
342652Sjkh	for (nic_i = 0; nic_i < n; nic_i++) {
343652Sjkh		void *addr;
344652Sjkh		uint64_t paddr;
3453256Sswallace		uint32_t nm_i = netmap_idx_n2k(&na->rx_rings[0], nic_i);
346652Sjkh
347652Sjkh		addr = PNMB(slot + nm_i, &paddr);
3483256Sswallace
3493256Sswallace		netmap_reload_map(sc->rl_ldata.rl_rx_mtag,
3503256Sswallace		    sc->rl_ldata.rl_rx_desc[nic_i].rx_dmamap, addr);
3513256Sswallace		bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
3523256Sswallace		    sc->rl_ldata.rl_rx_desc[nic_i].rx_dmamap, BUS_DMASYNC_PREREAD);
3533256Sswallace		desc[nic_i].rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
3543256Sswallace		desc[nic_i].rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
3553256Sswallace		cmdstat = NETMAP_BUF_SIZE;
3563256Sswallace		if (nic_i == n - 1) /* mark the end of ring */
3579750Sjkh			cmdstat |= RL_RDESC_CMD_EOR;
3589750Sjkh		if (nic_i < max_avail)
3593256Sswallace			cmdstat |= RL_RDESC_CMD_OWN;
3603256Sswallace		desc[nic_i].rl_cmdstat = htole32(cmdstat);
3613256Sswallace	}
3623256Sswallace}
3633256Sswallace
3643256Sswallace
3653256Sswallacestatic void
3663256Sswallacere_netmap_attach(struct rl_softc *sc)
3673256Sswallace{
3683256Sswallace	struct netmap_adapter na;
3693256Sswallace
3703256Sswallace	bzero(&na, sizeof(na));
3713256Sswallace
3723256Sswallace	na.ifp = sc->rl_ifp;
3733256Sswallace	na.na_flags = NAF_BDG_MAYSLEEP;
3743256Sswallace	na.num_tx_desc = sc->rl_ldata.rl_tx_desc_cnt;
3753256Sswallace	na.num_rx_desc = sc->rl_ldata.rl_rx_desc_cnt;
3763256Sswallace	na.nm_txsync = re_netmap_txsync;
3773256Sswallace	na.nm_rxsync = re_netmap_rxsync;
3783256Sswallace	na.nm_register = re_netmap_reg;
3793256Sswallace	na.num_tx_rings = na.num_rx_rings = 1;
3803256Sswallace	netmap_attach(&na);
3813256Sswallace}
3823256Sswallace
3833256Sswallace/* end of file */
3843256Sswallace