1315333Serj/******************************************************************************
2315333Serj
3315333Serj  Copyright (c) 2001-2017, Intel Corporation
4315333Serj  All rights reserved.
5315333Serj
6315333Serj  Redistribution and use in source and binary forms, with or without
7315333Serj  modification, are permitted provided that the following conditions are met:
8315333Serj
9315333Serj   1. Redistributions of source code must retain the above copyright notice,
10315333Serj      this list of conditions and the following disclaimer.
11315333Serj
12315333Serj   2. Redistributions in binary form must reproduce the above copyright
13315333Serj      notice, this list of conditions and the following disclaimer in the
14315333Serj      documentation and/or other materials provided with the distribution.
15315333Serj
16315333Serj   3. Neither the name of the Intel Corporation nor the names of its
17315333Serj      contributors may be used to endorse or promote products derived from
18315333Serj      this software without specific prior written permission.
19315333Serj
20315333Serj  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21315333Serj  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22315333Serj  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23315333Serj  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24315333Serj  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25315333Serj  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26315333Serj  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27315333Serj  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28315333Serj  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29315333Serj  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30315333Serj  POSSIBILITY OF SUCH DAMAGE.
31315333Serj
32315333Serj******************************************************************************/
33315333Serj/*$FreeBSD: stable/10/sys/dev/ixgbe/ixv_netmap.c 323830 2017-09-20 21:22:20Z marius $*/
34315333Serj
35315333Serj/*
36315333Serj * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved.
37315333Serj *
38315333Serj * Redistribution and use in source and binary forms, with or without
39315333Serj * modification, are permitted provided that the following conditions
40315333Serj * are met:
41315333Serj * 1. Redistributions of source code must retain the above copyright
42315333Serj *    notice, this list of conditions and the following disclaimer.
43315333Serj * 2. Redistributions in binary form must reproduce the above copyright
44315333Serj *    notice, this list of conditions and the following disclaimer in the
45315333Serj *    documentation and/or other materials provided with the distribution.
46315333Serj *
47315333Serj * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
48315333Serj * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49315333Serj * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50315333Serj * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
51315333Serj * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52315333Serj * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53315333Serj * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54315333Serj * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55315333Serj * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56315333Serj * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57315333Serj * SUCH DAMAGE.
58315333Serj */
59315333Serj
60315333Serj/*
61315333Serj * $FreeBSD: stable/10/sys/dev/ixgbe/ixv_netmap.c 323830 2017-09-20 21:22:20Z marius $
62315333Serj *
63323830Smarius * netmap support for: ixv
64315333Serj *
65315333Serj * This file is meant to be a reference on how to implement
66315333Serj * netmap support for a network driver.
67315333Serj * This file contains code but only static or inline functions used
68315333Serj * by a single driver. To avoid replication of code we just #include
69315333Serj * it near the beginning of the standard driver.
70315333Serj */
71315333Serj
72315333Serj#ifdef DEV_NETMAP
73315333Serj/*
74315333Serj * Some drivers may need the following headers. Others
75315333Serj * already include them by default
76315333Serj
77315333Serj#include <vm/vm.h>
78315333Serj#include <vm/pmap.h>
79315333Serj
80315333Serj */
81315333Serj#include "ixv.h"
82315333Serj
83315333Serj/*
84315333Serj * device-specific sysctl variables:
85315333Serj *
86317711Serj * ixv_rx_miss, ixv_rx_miss_bufs:
87315333Serj *	count packets that might be missed due to lost interrupts.
88315333Serj */
89315333SerjSYSCTL_DECL(_dev_netmap);
90317711Serjstatic int ixv_rx_miss, ixv_rx_miss_bufs;
91317711SerjSYSCTL_INT(_dev_netmap, OID_AUTO, ixv_rx_miss,
92317711Serj    CTLFLAG_RW, &ixv_rx_miss, 0, "potentially missed rx intr");
93317711SerjSYSCTL_INT(_dev_netmap, OID_AUTO, ixv_rx_miss_bufs,
94317711Serj    CTLFLAG_RW, &ixv_rx_miss_bufs, 0, "potentially missed rx intr bufs");
95315333Serj
96315333Serj
97315333Serj/*
98315333Serj * Register/unregister. We are already under netmap lock.
99315333Serj * Only called on the first register or the last unregister.
100315333Serj */
101315333Serjstatic int
102317711Serjixv_netmap_reg(struct netmap_adapter *na, int onoff)
103315333Serj{
104315333Serj	struct ifnet *ifp = na->ifp;
105315333Serj	struct adapter *adapter = ifp->if_softc;
106315333Serj
107315333Serj	IXGBE_CORE_LOCK(adapter);
108315333Serj	adapter->stop_locked(adapter);
109315333Serj
110315333Serj	/* enable or disable flags and callbacks in na and ifp */
111315333Serj	if (onoff) {
112315333Serj		nm_set_native_flags(na);
113315333Serj	} else {
114315333Serj		nm_clear_native_flags(na);
115315333Serj	}
116315333Serj	adapter->init_locked(adapter);	/* also enables intr */
117315333Serj	IXGBE_CORE_UNLOCK(adapter);
118315333Serj	return (ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1);
119315333Serj}
120315333Serj
121315333Serj
122315333Serj/*
123315333Serj * Reconcile kernel and user view of the transmit ring.
124315333Serj *
125315333Serj * All information is in the kring.
126315333Serj * Userspace wants to send packets up to the one before kring->rhead,
127315333Serj * kernel knows kring->nr_hwcur is the first unsent packet.
128315333Serj *
129315333Serj * Here we push packets out (as many as possible), and possibly
130315333Serj * reclaim buffers from previously completed transmission.
131315333Serj *
132315333Serj * The caller (netmap) guarantees that there is only one instance
133315333Serj * running at any time. Any interference with other driver
134315333Serj * methods should be handled by the individual drivers.
135315333Serj */
136315333Serjstatic int
137317711Serjixv_netmap_txsync(struct netmap_kring *kring, int flags)
138315333Serj{
139315333Serj	struct netmap_adapter *na = kring->na;
140315333Serj	struct ifnet *ifp = na->ifp;
141315333Serj	struct netmap_ring *ring = kring->ring;
142315333Serj	u_int nm_i;	/* index into the netmap ring */
143315333Serj	u_int nic_i;	/* index into the NIC ring */
144315333Serj	u_int n;
145315333Serj	u_int const lim = kring->nkr_num_slots - 1;
146315333Serj	u_int const head = kring->rhead;
147315333Serj	/*
148315333Serj	 * interrupts on every tx packet are expensive so request
149315333Serj	 * them every half ring, or where NS_REPORT is set
150315333Serj	 */
151315333Serj	u_int report_frequency = kring->nkr_num_slots >> 1;
152315333Serj
153315333Serj	/* device-specific */
154315333Serj	struct adapter *adapter = ifp->if_softc;
155315333Serj	struct tx_ring *txr = &adapter->tx_rings[kring->ring_id];
156315333Serj	int reclaim_tx;
157315333Serj
158315333Serj	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
159315333Serj			BUS_DMASYNC_POSTREAD);
160315333Serj
161315333Serj	/*
162315333Serj	 * First part: process new packets to send.
163315333Serj	 * nm_i is the current index in the netmap ring,
164315333Serj	 * nic_i is the corresponding index in the NIC ring.
165315333Serj	 * The two numbers differ because upon a *_init() we reset
166315333Serj	 * the NIC ring but leave the netmap ring unchanged.
167315333Serj	 * For the transmit ring, we have
168315333Serj	 *
169315333Serj	 *		nm_i = kring->nr_hwcur
170315333Serj	 *		nic_i = IXGBE_TDT (not tracked in the driver)
171315333Serj	 * and
172315333Serj	 * 		nm_i == (nic_i + kring->nkr_hwofs) % ring_size
173315333Serj	 *
174315333Serj	 * In this driver kring->nkr_hwofs >= 0, but for other
175315333Serj	 * drivers it might be negative as well.
176315333Serj	 */
177315333Serj
178315333Serj	/*
179315333Serj	 * If we have packets to send (kring->nr_hwcur != kring->rhead)
180315333Serj	 * iterate over the netmap ring, fetch length and update
181315333Serj	 * the corresponding slot in the NIC ring. Some drivers also
182315333Serj	 * need to update the buffer's physical address in the NIC slot
183315333Serj	 * even NS_BUF_CHANGED is not set (PNMB computes the addresses).
184315333Serj	 *
185315333Serj	 * The netmap_reload_map() calls is especially expensive,
186315333Serj	 * even when (as in this case) the tag is 0, so do only
187315333Serj	 * when the buffer has actually changed.
188315333Serj	 *
189315333Serj	 * If possible do not set the report/intr bit on all slots,
190315333Serj	 * but only a few times per ring or when NS_REPORT is set.
191315333Serj	 *
192315333Serj	 * Finally, on 10G and faster drivers, it might be useful
193315333Serj	 * to prefetch the next slot and txr entry.
194315333Serj	 */
195315333Serj
196315333Serj	nm_i = kring->nr_hwcur;
197315333Serj	if (nm_i != head) {	/* we have new packets to send */
198315333Serj		nic_i = netmap_idx_k2n(kring, nm_i);
199315333Serj
200315333Serj		__builtin_prefetch(&ring->slot[nm_i]);
201315333Serj		__builtin_prefetch(&txr->tx_buffers[nic_i]);
202315333Serj
203315333Serj		for (n = 0; nm_i != head; n++) {
204315333Serj			struct netmap_slot *slot = &ring->slot[nm_i];
205315333Serj			u_int len = slot->len;
206315333Serj			uint64_t paddr;
207315333Serj			void *addr = PNMB(na, slot, &paddr);
208315333Serj
209315333Serj			/* device-specific */
210315333Serj			union ixgbe_adv_tx_desc *curr = &txr->tx_base[nic_i];
211315333Serj			struct ixgbe_tx_buf *txbuf = &txr->tx_buffers[nic_i];
212315333Serj			int flags = (slot->flags & NS_REPORT ||
213315333Serj				nic_i == 0 || nic_i == report_frequency) ?
214315333Serj				IXGBE_TXD_CMD_RS : 0;
215315333Serj
216315333Serj			/* prefetch for next round */
217315333Serj			__builtin_prefetch(&ring->slot[nm_i + 1]);
218315333Serj			__builtin_prefetch(&txr->tx_buffers[nic_i + 1]);
219315333Serj
220315333Serj			NM_CHECK_ADDR_LEN(na, addr, len);
221315333Serj
222315333Serj			if (slot->flags & NS_BUF_CHANGED) {
223315333Serj				/* buffer has changed, reload map */
224315333Serj				netmap_reload_map(na, txr->txtag, txbuf->map, addr);
225315333Serj			}
226315333Serj			slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
227315333Serj
228315333Serj			/* Fill the slot in the NIC ring. */
229315333Serj			/* Use legacy descriptor, they are faster? */
230315333Serj			curr->read.buffer_addr = htole64(paddr);
231315333Serj			curr->read.olinfo_status = 0;
232315333Serj			curr->read.cmd_type_len = htole32(len | flags |
233315333Serj				IXGBE_ADVTXD_DCMD_IFCS | IXGBE_TXD_CMD_EOP);
234315333Serj
235315333Serj			/* make sure changes to the buffer are synced */
236315333Serj			bus_dmamap_sync(txr->txtag, txbuf->map,
237315333Serj				BUS_DMASYNC_PREWRITE);
238315333Serj
239315333Serj			nm_i = nm_next(nm_i, lim);
240315333Serj			nic_i = nm_next(nic_i, lim);
241315333Serj		}
242315333Serj		kring->nr_hwcur = head;
243315333Serj
244315333Serj		/* synchronize the NIC ring */
245315333Serj		bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
246315333Serj			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
247315333Serj
248315333Serj		/* (re)start the tx unit up to slot nic_i (excluded) */
249315333Serj		IXGBE_WRITE_REG(&adapter->hw, txr->tail, nic_i);
250315333Serj	}
251315333Serj
252315333Serj	/*
253315333Serj	 * Second part: reclaim buffers for completed transmissions.
254315333Serj	 * Because this is expensive (we read a NIC register etc.)
255315333Serj	 * we only do it in specific cases (see below).
256315333Serj	 */
257315333Serj	if (flags & NAF_FORCE_RECLAIM) {
258315333Serj		reclaim_tx = 1; /* forced reclaim */
259315333Serj	} else if (!nm_kr_txempty(kring)) {
260315333Serj		reclaim_tx = 0; /* have buffers, no reclaim */
261315333Serj	} else {
262315333Serj		/*
263315333Serj		 * No buffers available. Locate previous slot with
264315333Serj		 * REPORT_STATUS set.
265315333Serj		 * If the slot has DD set, we can reclaim space,
266315333Serj		 * otherwise wait for the next interrupt.
267315333Serj		 * This enables interrupt moderation on the tx
268315333Serj		 * side though it might reduce throughput.
269315333Serj		 */
270315333Serj		struct ixgbe_legacy_tx_desc *txd =
271315333Serj		    (struct ixgbe_legacy_tx_desc *)txr->tx_base;
272315333Serj
273315333Serj		nic_i = txr->next_to_clean + report_frequency;
274315333Serj		if (nic_i > lim)
275315333Serj			nic_i -= lim + 1;
276315333Serj		// round to the closest with dd set
277315333Serj		nic_i = (nic_i < kring->nkr_num_slots / 4 ||
278315333Serj			 nic_i >= kring->nkr_num_slots*3/4) ?
279315333Serj			0 : report_frequency;
280315333Serj		reclaim_tx = txd[nic_i].upper.fields.status & IXGBE_TXD_STAT_DD;	// XXX cpu_to_le32 ?
281315333Serj	}
282315333Serj	if (reclaim_tx) {
283315333Serj		/*
284315333Serj		 * Record completed transmissions.
285315333Serj		 * We (re)use the driver's txr->next_to_clean to keep
286315333Serj		 * track of the most recently completed transmission.
287315333Serj		 *
288315333Serj		 * The datasheet discourages the use of TDH to find
289315333Serj		 * out the number of sent packets, but we only set
290315333Serj		 * REPORT_STATUS in a few slots so TDH is the only
291315333Serj		 * good way.
292315333Serj		 */
293323830Smarius		nic_i = IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDH(kring->ring_id));
294315333Serj		if (nic_i >= kring->nkr_num_slots) { /* XXX can it happen ? */
295315333Serj			D("TDH wrap %d", nic_i);
296315333Serj			nic_i -= kring->nkr_num_slots;
297315333Serj		}
298315333Serj		if (nic_i != txr->next_to_clean) {
299315333Serj			/* some tx completed, increment avail */
300315333Serj			txr->next_to_clean = nic_i;
301315333Serj			kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim);
302315333Serj		}
303315333Serj	}
304315333Serj
305323830Smarius	nm_txsync_finalize(kring);
306323830Smarius
307315333Serj	return 0;
308315333Serj}
309315333Serj
310315333Serj
311315333Serj/*
312315333Serj * Reconcile kernel and user view of the receive ring.
313315333Serj * Same as for the txsync, this routine must be efficient.
314315333Serj * The caller guarantees a single invocations, but races against
315315333Serj * the rest of the driver should be handled here.
316315333Serj *
317315333Serj * On call, kring->rhead is the first packet that userspace wants
318315333Serj * to keep, and kring->rcur is the wakeup point.
319315333Serj * The kernel has previously reported packets up to kring->rtail.
320315333Serj *
321315333Serj * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective
322315333Serj * of whether or not we received an interrupt.
323315333Serj */
324315333Serjstatic int
325317711Serjixv_netmap_rxsync(struct netmap_kring *kring, int flags)
326315333Serj{
327315333Serj	struct netmap_adapter *na = kring->na;
328315333Serj	struct ifnet *ifp = na->ifp;
329315333Serj	struct netmap_ring *ring = kring->ring;
330315333Serj	u_int nm_i;	/* index into the netmap ring */
331315333Serj	u_int nic_i;	/* index into the NIC ring */
332315333Serj	u_int n;
333315333Serj	u_int const lim = kring->nkr_num_slots - 1;
334323830Smarius	u_int const head = nm_rxsync_prologue(kring);
335315333Serj	int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
336315333Serj
337315333Serj	/* device-specific */
338315333Serj	struct adapter *adapter = ifp->if_softc;
339315333Serj	struct rx_ring *rxr = &adapter->rx_rings[kring->ring_id];
340315333Serj
341315333Serj	if (head > lim)
342315333Serj		return netmap_ring_reinit(kring);
343315333Serj
344315333Serj	/* XXX check sync modes */
345315333Serj	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
346315333Serj			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
347315333Serj
348315333Serj	/*
349315333Serj	 * First part: import newly received packets.
350315333Serj	 *
351315333Serj	 * nm_i is the index of the next free slot in the netmap ring,
352315333Serj	 * nic_i is the index of the next received packet in the NIC ring,
353315333Serj	 * and they may differ in case if_init() has been called while
354315333Serj	 * in netmap mode. For the receive ring we have
355315333Serj	 *
356315333Serj	 *	nic_i = rxr->next_to_check;
357315333Serj	 *	nm_i = kring->nr_hwtail (previous)
358315333Serj	 * and
359315333Serj	 *	nm_i == (nic_i + kring->nkr_hwofs) % ring_size
360315333Serj	 *
361315333Serj	 * rxr->next_to_check is set to 0 on a ring reinit
362315333Serj	 */
363315333Serj	if (netmap_no_pendintr || force_update) {
364323830Smarius		int crclen = 0;
365315333Serj		uint16_t slot_flags = kring->nkr_slot_flags;
366315333Serj
367315333Serj		nic_i = rxr->next_to_check; // or also k2n(kring->nr_hwtail)
368315333Serj		nm_i = netmap_idx_n2k(kring, nic_i);
369315333Serj
370315333Serj		for (n = 0; ; n++) {
371315333Serj			union ixgbe_adv_rx_desc *curr = &rxr->rx_base[nic_i];
372315333Serj			uint32_t staterr = le32toh(curr->wb.upper.status_error);
373315333Serj
374315333Serj			if ((staterr & IXGBE_RXD_STAT_DD) == 0)
375315333Serj				break;
376315333Serj			ring->slot[nm_i].len = le16toh(curr->wb.upper.length) - crclen;
377315333Serj			ring->slot[nm_i].flags = slot_flags;
378315333Serj			bus_dmamap_sync(rxr->ptag,
379315333Serj			    rxr->rx_buffers[nic_i].pmap, BUS_DMASYNC_POSTREAD);
380315333Serj			nm_i = nm_next(nm_i, lim);
381315333Serj			nic_i = nm_next(nic_i, lim);
382315333Serj		}
383315333Serj		if (n) { /* update the state variables */
384315333Serj			if (netmap_no_pendintr && !force_update) {
385315333Serj				/* diagnostics */
386317711Serj				ixv_rx_miss ++;
387317711Serj				ixv_rx_miss_bufs += n;
388315333Serj			}
389315333Serj			rxr->next_to_check = nic_i;
390315333Serj			kring->nr_hwtail = nm_i;
391315333Serj		}
392315333Serj		kring->nr_kflags &= ~NKR_PENDINTR;
393315333Serj	}
394315333Serj
395315333Serj	/*
396315333Serj	 * Second part: skip past packets that userspace has released.
397315333Serj	 * (kring->nr_hwcur to kring->rhead excluded),
398315333Serj	 * and make the buffers available for reception.
399315333Serj	 * As usual nm_i is the index in the netmap ring,
400315333Serj	 * nic_i is the index in the NIC ring, and
401315333Serj	 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size
402315333Serj	 */
403315333Serj	nm_i = kring->nr_hwcur;
404315333Serj	if (nm_i != head) {
405315333Serj		nic_i = netmap_idx_k2n(kring, nm_i);
406315333Serj		for (n = 0; nm_i != head; n++) {
407315333Serj			struct netmap_slot *slot = &ring->slot[nm_i];
408315333Serj			uint64_t paddr;
409315333Serj			void *addr = PNMB(na, slot, &paddr);
410315333Serj
411315333Serj			union ixgbe_adv_rx_desc *curr = &rxr->rx_base[nic_i];
412315333Serj			struct ixgbe_rx_buf *rxbuf = &rxr->rx_buffers[nic_i];
413315333Serj
414315333Serj			if (addr == NETMAP_BUF_BASE(na)) /* bad buf */
415315333Serj				goto ring_reset;
416315333Serj
417315333Serj			if (slot->flags & NS_BUF_CHANGED) {
418315333Serj				/* buffer has changed, reload map */
419315333Serj				netmap_reload_map(na, rxr->ptag, rxbuf->pmap, addr);
420315333Serj				slot->flags &= ~NS_BUF_CHANGED;
421315333Serj			}
422315333Serj			curr->wb.upper.status_error = 0;
423315333Serj			curr->read.pkt_addr = htole64(paddr);
424315333Serj			bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
425315333Serj			    BUS_DMASYNC_PREREAD);
426315333Serj			nm_i = nm_next(nm_i, lim);
427315333Serj			nic_i = nm_next(nic_i, lim);
428315333Serj		}
429315333Serj		kring->nr_hwcur = head;
430315333Serj
431315333Serj		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
432315333Serj		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
433315333Serj		/*
434315333Serj		 * IMPORTANT: we must leave one free slot in the ring,
435315333Serj		 * so move nic_i back by one unit
436315333Serj		 */
437315333Serj		nic_i = nm_prev(nic_i, lim);
438315333Serj		IXGBE_WRITE_REG(&adapter->hw, rxr->tail, nic_i);
439315333Serj	}
440315333Serj
441323830Smarius	/* tell userspace that there might be new packets */
442323830Smarius	nm_rxsync_finalize(kring);
443323830Smarius
444315333Serj	return 0;
445315333Serj
446315333Serjring_reset:
447315333Serj	return netmap_ring_reinit(kring);
448315333Serj}
449315333Serj
450315333Serj
451315333Serj/*
452315333Serj * The attach routine, called near the end of ixgbe_attach(),
453315333Serj * fills the parameters for netmap_attach() and calls it.
454315333Serj * It cannot fail, in the worst case (such as no memory)
455315333Serj * netmap mode will be disabled and the driver will only
456315333Serj * operate in standard mode.
457315333Serj */
458315333Serjvoid
459317711Serjixv_netmap_attach(struct adapter *adapter)
460315333Serj{
461315333Serj	struct netmap_adapter na;
462315333Serj
463315333Serj	bzero(&na, sizeof(na));
464315333Serj
465315333Serj	na.ifp = adapter->ifp;
466315333Serj	na.na_flags = NAF_BDG_MAYSLEEP;
467315333Serj	na.num_tx_desc = adapter->num_tx_desc;
468315333Serj	na.num_rx_desc = adapter->num_rx_desc;
469317711Serj	na.nm_txsync = ixv_netmap_txsync;
470317711Serj	na.nm_rxsync = ixv_netmap_rxsync;
471317711Serj	na.nm_register = ixv_netmap_reg;
472315333Serj	na.num_tx_rings = na.num_rx_rings = adapter->num_queues;
473315333Serj	netmap_attach(&na);
474315333Serj}
475315333Serj
476315333Serj#endif /* DEV_NETMAP */
477315333Serj
478315333Serj/* end of file */
479