if_lem_netmap.h revision 232238
1/*
2 * Copyright (C) 2011 Matteo Landi, Luigi Rizzo. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26
27/*
28 * $FreeBSD: head/sys/dev/netmap/if_lem_netmap.h 232238 2012-02-27 19:05:01Z luigi $
29 * $Id: if_lem_netmap.h 10627 2012-02-23 19:37:15Z luigi $
30 *
31 * netmap support for "lem"
32 *
33 * For details on netmap support please see ixgbe_netmap.h
34 */
35
36#include <net/netmap.h>
37#include <sys/selinfo.h>
38#include <vm/vm.h>
39#include <vm/pmap.h>    /* vtophys ? */
40#include <dev/netmap/netmap_kern.h>
41
42
43static void
44lem_netmap_lock_wrapper(struct ifnet *ifp, int what, u_int ringid)
45{
46	struct adapter *adapter = ifp->if_softc;
47
48	/* only one ring here so ignore the ringid */
49	switch (what) {
50	case NETMAP_CORE_LOCK:
51		EM_CORE_LOCK(adapter);
52		break;
53	case NETMAP_CORE_UNLOCK:
54		EM_CORE_UNLOCK(adapter);
55		break;
56	case NETMAP_TX_LOCK:
57		EM_TX_LOCK(adapter);
58		break;
59	case NETMAP_TX_UNLOCK:
60		EM_TX_UNLOCK(adapter);
61		break;
62	case NETMAP_RX_LOCK:
63		EM_RX_LOCK(adapter);
64		break;
65	case NETMAP_RX_UNLOCK:
66		EM_RX_UNLOCK(adapter);
67		break;
68	}
69}
70
71
72/*
73 * Register/unregister
74 */
75static int
76lem_netmap_reg(struct ifnet *ifp, int onoff)
77{
78	struct adapter *adapter = ifp->if_softc;
79	struct netmap_adapter *na = NA(ifp);
80	int error = 0;
81
82	if (na == NULL)
83		return EINVAL;
84
85	lem_disable_intr(adapter);
86
87	/* Tell the stack that the interface is no longer active */
88	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
89
90#ifndef EM_LEGACY_IRQ // XXX do we need this ?
91	taskqueue_block(adapter->tq);
92	taskqueue_drain(adapter->tq, &adapter->rxtx_task);
93	taskqueue_drain(adapter->tq, &adapter->link_task);
94#endif /* !EM_LEGCY_IRQ */
95	if (onoff) {
96		ifp->if_capenable |= IFCAP_NETMAP;
97
98		na->if_transmit = ifp->if_transmit;
99		ifp->if_transmit = netmap_start;
100
101		lem_init_locked(adapter);
102		if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == 0) {
103			error = ENOMEM;
104			goto fail;
105		}
106	} else {
107fail:
108		/* return to non-netmap mode */
109		ifp->if_transmit = na->if_transmit;
110		ifp->if_capenable &= ~IFCAP_NETMAP;
111		lem_init_locked(adapter);	/* also enable intr */
112	}
113
114#ifndef EM_LEGACY_IRQ
115	taskqueue_unblock(adapter->tq); // XXX do we need this ?
116#endif /* !EM_LEGCY_IRQ */
117
118	return (error);
119}
120
121
122/*
123 * Reconcile kernel and user view of the transmit ring.
124 */
125static int
126lem_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
127{
128	struct adapter *adapter = ifp->if_softc;
129	struct netmap_adapter *na = NA(ifp);
130	struct netmap_kring *kring = &na->tx_rings[ring_nr];
131	struct netmap_ring *ring = kring->ring;
132	u_int j, k, l, n = 0, lim = kring->nkr_num_slots - 1;
133
134	/* generate an interrupt approximately every half ring */
135	int report_frequency = kring->nkr_num_slots >> 1;
136
137	/* take a copy of ring->cur now, and never read it again */
138	k = ring->cur;
139	if (k > lim)
140		return netmap_ring_reinit(kring);
141
142	if (do_lock)
143		EM_TX_LOCK(adapter);
144	bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
145			BUS_DMASYNC_POSTREAD);
146	/*
147	 * Process new packets to send. j is the current index in the
148	 * netmap ring, l is the corresponding index in the NIC ring.
149	 */
150	j = kring->nr_hwcur;
151	if (j != k) {	/* we have new packets to send */
152		l = netmap_idx_k2n(kring, j);
153		for (n = 0; j != k; n++) {
154			/* slot is the current slot in the netmap ring */
155			struct netmap_slot *slot = &ring->slot[j];
156			/* curr is the current slot in the nic ring */
157			struct e1000_tx_desc *curr = &adapter->tx_desc_base[l];
158			struct em_buffer *txbuf = &adapter->tx_buffer_area[l];
159			int flags = ((slot->flags & NS_REPORT) ||
160				j == 0 || j == report_frequency) ?
161					E1000_TXD_CMD_RS : 0;
162			uint64_t paddr;
163			void *addr = PNMB(slot, &paddr);
164			u_int len = slot->len;
165
166			if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) {
167				if (do_lock)
168					EM_TX_UNLOCK(adapter);
169				return netmap_ring_reinit(kring);
170			}
171
172			slot->flags &= ~NS_REPORT;
173			if (slot->flags & NS_BUF_CHANGED) {
174				/* buffer has changed, reload map */
175				netmap_reload_map(adapter->txtag, txbuf->map, addr);
176				curr->buffer_addr = htole64(paddr);
177				slot->flags &= ~NS_BUF_CHANGED;
178			}
179			curr->upper.data = 0;
180			curr->lower.data =
181			    htole32( adapter->txd_cmd | len |
182				(E1000_TXD_CMD_EOP | flags) );
183
184			bus_dmamap_sync(adapter->txtag, txbuf->map,
185			    BUS_DMASYNC_PREWRITE);
186			j = (j == lim) ? 0 : j + 1;
187			l = (l == lim) ? 0 : l + 1;
188		}
189		kring->nr_hwcur = k; /* the saved ring->cur */
190		kring->nr_hwavail -= n;
191
192		bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
193		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
194
195		E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), l);
196	}
197
198	if (n == 0 || kring->nr_hwavail < 1) {
199		int delta;
200
201		/* record completed transmissions using TDH */
202		l = E1000_READ_REG(&adapter->hw, E1000_TDH(0));
203		if (l >= kring->nkr_num_slots) { /* XXX can it happen ? */
204			D("bad TDH %d", l);
205			l -= kring->nkr_num_slots;
206		}
207		delta = l - adapter->next_tx_to_clean;
208		if (delta) {
209			/* some tx completed, increment hwavail. */
210			if (delta < 0)
211				delta += kring->nkr_num_slots;
212			adapter->next_tx_to_clean = l;
213			kring->nr_hwavail += delta;
214		}
215	}
216	/* update avail to what the kernel knows */
217	ring->avail = kring->nr_hwavail;
218
219	if (do_lock)
220		EM_TX_UNLOCK(adapter);
221	return 0;
222}
223
224
225/*
226 * Reconcile kernel and user view of the receive ring.
227 */
228static int
229lem_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
230{
231	struct adapter *adapter = ifp->if_softc;
232	struct netmap_adapter *na = NA(ifp);
233	struct netmap_kring *kring = &na->rx_rings[ring_nr];
234	struct netmap_ring *ring = kring->ring;
235	int j, l, n, lim = kring->nkr_num_slots - 1;
236	int force_update = do_lock || kring->nr_kflags & NKR_PENDINTR;
237	u_int k = ring->cur, resvd = ring->reserved;
238
239	if (k > lim)
240		return netmap_ring_reinit(kring);
241
242	if (do_lock)
243		EM_RX_LOCK(adapter);
244
245	/* XXX check sync modes */
246	bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
247			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
248
249	/*
250	 * Import newly received packets into the netmap ring.
251	 * j is an index in the netmap ring, l in the NIC ring.
252	 */
253	l = adapter->next_rx_desc_to_check;
254	j = netmap_idx_n2k(kring, l);
255	if (netmap_no_pendintr || force_update) {
256		for (n = 0; ; n++) {
257			struct e1000_rx_desc *curr = &adapter->rx_desc_base[l];
258			uint32_t staterr = le32toh(curr->status);
259			int len;
260
261			if ((staterr & E1000_RXD_STAT_DD) == 0)
262				break;
263			len = le16toh(curr->length) - 4; // CRC
264			if (len < 0) {
265				D("bogus pkt size at %d", j);
266				len = 0;
267			}
268			ring->slot[j].len = len;
269			bus_dmamap_sync(adapter->rxtag,
270				adapter->rx_buffer_area[l].map,
271				    BUS_DMASYNC_POSTREAD);
272			j = (j == lim) ? 0 : j + 1;
273			l = (l == lim) ? 0 : l + 1;
274		}
275		if (n) { /* update the state variables */
276			adapter->next_rx_desc_to_check = l;
277			kring->nr_hwavail += n;
278		}
279		kring->nr_kflags &= ~NKR_PENDINTR;
280	}
281
282	/* skip past packets that userspace has released */
283	j = kring->nr_hwcur;	/* netmap ring index */
284	if (resvd > 0) {
285		if (resvd + ring->avail >= lim + 1) {
286			D("XXX invalid reserve/avail %d %d", resvd, ring->avail);
287			ring->reserved = resvd = 0; // XXX panic...
288		}
289		k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd;
290	}
291	if (j != k) { /* userspace has released some packets. */
292		l = netmap_idx_k2n(kring, j); /* NIC ring index */
293		for (n = 0; j != k; n++) {
294			struct netmap_slot *slot = &ring->slot[j];
295			struct e1000_rx_desc *curr = &adapter->rx_desc_base[l];
296			struct em_buffer *rxbuf = &adapter->rx_buffer_area[l];
297			uint64_t paddr;
298			void *addr = PNMB(slot, &paddr);
299
300			if (addr == netmap_buffer_base) { /* bad buf */
301				if (do_lock)
302					EM_RX_UNLOCK(adapter);
303				return netmap_ring_reinit(kring);
304			}
305
306			if (slot->flags & NS_BUF_CHANGED) {
307				/* buffer has changed, reload map */
308				netmap_reload_map(adapter->rxtag, rxbuf->map, addr);
309				curr->buffer_addr = htole64(paddr);
310				slot->flags &= ~NS_BUF_CHANGED;
311			}
312			curr->status = 0;
313
314			bus_dmamap_sync(adapter->rxtag, rxbuf->map,
315			    BUS_DMASYNC_PREREAD);
316
317			j = (j == lim) ? 0 : j + 1;
318			l = (l == lim) ? 0 : l + 1;
319		}
320		kring->nr_hwavail -= n;
321		kring->nr_hwcur = k;
322		bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
323		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
324		/*
325		 * IMPORTANT: we must leave one free slot in the ring,
326		 * so move l back by one unit
327		 */
328		l = (l == 0) ? lim : l - 1;
329		E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), l);
330	}
331	/* tell userspace that there are new packets */
332	ring->avail = kring->nr_hwavail - resvd;
333	if (do_lock)
334		EM_RX_UNLOCK(adapter);
335	return 0;
336}
337
338
339static void
340lem_netmap_attach(struct adapter *adapter)
341{
342	struct netmap_adapter na;
343
344	bzero(&na, sizeof(na));
345
346	na.ifp = adapter->ifp;
347	na.separate_locks = 1;
348	na.num_tx_desc = adapter->num_tx_desc;
349	na.num_rx_desc = adapter->num_rx_desc;
350	na.nm_txsync = lem_netmap_txsync;
351	na.nm_rxsync = lem_netmap_rxsync;
352	na.nm_lock = lem_netmap_lock_wrapper;
353	na.nm_register = lem_netmap_reg;
354	netmap_attach(&na, 1);
355}
356
357/* end of file */
358