1315333Serj/* 2315333Serj * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved. 3315333Serj * 4315333Serj * Redistribution and use in source and binary forms, with or without 5315333Serj * modification, are permitted provided that the following conditions 6315333Serj * are met: 7315333Serj * 1. Redistributions of source code must retain the above copyright 8315333Serj * notice, this list of conditions and the following disclaimer. 9315333Serj * 2. Redistributions in binary form must reproduce the above copyright 10315333Serj * notice, this list of conditions and the following disclaimer in the 11315333Serj * documentation and/or other materials provided with the distribution. 12315333Serj * 13315333Serj * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14315333Serj * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15315333Serj * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16315333Serj * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17315333Serj * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18315333Serj * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19315333Serj * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20315333Serj * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21315333Serj * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22315333Serj * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23315333Serj * SUCH DAMAGE. 24315333Serj */ 25315333Serj 26315333Serj/* 27315333Serj * $FreeBSD: stable/10/sys/dev/ixgbe/ixgbe_netmap.c 323830 2017-09-20 21:22:20Z marius $ 28315333Serj * 29315333Serj * netmap support for: ixgbe 30315333Serj * 31315333Serj * This file is meant to be a reference on how to implement 32315333Serj * netmap support for a network driver. 33315333Serj * This file contains code but only static or inline functions used 34315333Serj * by a single driver. To avoid replication of code we just #include 35315333Serj * it near the beginning of the standard driver. 36315333Serj */ 37315333Serj 38315333Serj#ifdef DEV_NETMAP 39315333Serj/* 40315333Serj * Some drivers may need the following headers. Others 41315333Serj * already include them by default 42315333Serj 43315333Serj#include <vm/vm.h> 44315333Serj#include <vm/pmap.h> 45315333Serj 46315333Serj */ 47315333Serj#include "ixgbe.h" 48315333Serj 49315333Serj/* 50315333Serj * device-specific sysctl variables: 51315333Serj * 52315333Serj * ix_crcstrip: 0: keep CRC in rx frames (default), 1: strip it. 53315333Serj * During regular operations the CRC is stripped, but on some 54315333Serj * hardware reception of frames not multiple of 64 is slower, 55315333Serj * so using crcstrip=0 helps in benchmarks. 56315333Serj * 57315333Serj * ix_rx_miss, ix_rx_miss_bufs: 58315333Serj * count packets that might be missed due to lost interrupts. 59315333Serj */ 60315333SerjSYSCTL_DECL(_dev_netmap); 61315333Serjstatic int ix_rx_miss, ix_rx_miss_bufs; 62315333Serjint ix_crcstrip; 63315333SerjSYSCTL_INT(_dev_netmap, OID_AUTO, ix_crcstrip, 64315333Serj CTLFLAG_RW, &ix_crcstrip, 0, "strip CRC on rx frames"); 65315333SerjSYSCTL_INT(_dev_netmap, OID_AUTO, ix_rx_miss, 66315333Serj CTLFLAG_RW, &ix_rx_miss, 0, "potentially missed rx intr"); 67315333SerjSYSCTL_INT(_dev_netmap, OID_AUTO, ix_rx_miss_bufs, 68315333Serj CTLFLAG_RW, &ix_rx_miss_bufs, 0, "potentially missed rx intr bufs"); 69315333Serj 70315333Serj 71315333Serjstatic void 72315333Serjset_crcstrip(struct ixgbe_hw *hw, int onoff) 73315333Serj{ 74315333Serj /* crc stripping is set in two places: 75315333Serj * IXGBE_HLREG0 (modified on init_locked and hw reset) 76315333Serj * IXGBE_RDRXCTL (set by the original driver in 77315333Serj * ixgbe_setup_hw_rsc() called in init_locked. 78315333Serj * We disable the setting when netmap is compiled in). 79315333Serj * We update the values here, but also in ixgbe.c because 80315333Serj * init_locked sometimes is called outside our control. 81315333Serj */ 82315333Serj uint32_t hl, rxc; 83315333Serj 84315333Serj hl = IXGBE_READ_REG(hw, IXGBE_HLREG0); 85315333Serj rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 86315333Serj if (netmap_verbose) 87315333Serj D("%s read HLREG 0x%x rxc 0x%x", 88315333Serj onoff ? "enter" : "exit", hl, rxc); 89315333Serj /* hw requirements ... */ 90315333Serj rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; 91315333Serj rxc |= IXGBE_RDRXCTL_RSCACKC; 92315333Serj if (onoff && !ix_crcstrip) { 93315333Serj /* keep the crc. Fast rx */ 94315333Serj hl &= ~IXGBE_HLREG0_RXCRCSTRP; 95315333Serj rxc &= ~IXGBE_RDRXCTL_CRCSTRIP; 96315333Serj } else { 97315333Serj /* reset default mode */ 98315333Serj hl |= IXGBE_HLREG0_RXCRCSTRP; 99315333Serj rxc |= IXGBE_RDRXCTL_CRCSTRIP; 100315333Serj } 101315333Serj if (netmap_verbose) 102315333Serj D("%s write HLREG 0x%x rxc 0x%x", 103315333Serj onoff ? "enter" : "exit", hl, rxc); 104315333Serj IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl); 105315333Serj IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc); 106315333Serj} 107315333Serj 108315333Serj 109315333Serj/* 110315333Serj * Register/unregister. We are already under netmap lock. 111315333Serj * Only called on the first register or the last unregister. 112315333Serj */ 113315333Serjstatic int 114315333Serjixgbe_netmap_reg(struct netmap_adapter *na, int onoff) 115315333Serj{ 116315333Serj struct ifnet *ifp = na->ifp; 117315333Serj struct adapter *adapter = ifp->if_softc; 118315333Serj 119315333Serj IXGBE_CORE_LOCK(adapter); 120315333Serj adapter->stop_locked(adapter); 121315333Serj 122315333Serj set_crcstrip(&adapter->hw, onoff); 123315333Serj /* enable or disable flags and callbacks in na and ifp */ 124315333Serj if (onoff) { 125315333Serj nm_set_native_flags(na); 126315333Serj } else { 127315333Serj nm_clear_native_flags(na); 128315333Serj } 129315333Serj adapter->init_locked(adapter); /* also enables intr */ 130315333Serj set_crcstrip(&adapter->hw, onoff); // XXX why twice ? 131315333Serj IXGBE_CORE_UNLOCK(adapter); 132315333Serj return (ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1); 133315333Serj} 134315333Serj 135315333Serj 136315333Serj/* 137315333Serj * Reconcile kernel and user view of the transmit ring. 138315333Serj * 139315333Serj * All information is in the kring. 140315333Serj * Userspace wants to send packets up to the one before kring->rhead, 141315333Serj * kernel knows kring->nr_hwcur is the first unsent packet. 142315333Serj * 143315333Serj * Here we push packets out (as many as possible), and possibly 144315333Serj * reclaim buffers from previously completed transmission. 145315333Serj * 146315333Serj * The caller (netmap) guarantees that there is only one instance 147315333Serj * running at any time. Any interference with other driver 148315333Serj * methods should be handled by the individual drivers. 149315333Serj */ 150315333Serjstatic int 151315333Serjixgbe_netmap_txsync(struct netmap_kring *kring, int flags) 152315333Serj{ 153315333Serj struct netmap_adapter *na = kring->na; 154315333Serj struct ifnet *ifp = na->ifp; 155315333Serj struct netmap_ring *ring = kring->ring; 156315333Serj u_int nm_i; /* index into the netmap ring */ 157315333Serj u_int nic_i; /* index into the NIC ring */ 158315333Serj u_int n; 159315333Serj u_int const lim = kring->nkr_num_slots - 1; 160315333Serj u_int const head = kring->rhead; 161315333Serj /* 162315333Serj * interrupts on every tx packet are expensive so request 163315333Serj * them every half ring, or where NS_REPORT is set 164315333Serj */ 165315333Serj u_int report_frequency = kring->nkr_num_slots >> 1; 166315333Serj 167315333Serj /* device-specific */ 168315333Serj struct adapter *adapter = ifp->if_softc; 169315333Serj struct tx_ring *txr = &adapter->tx_rings[kring->ring_id]; 170315333Serj int reclaim_tx; 171315333Serj 172315333Serj bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 173315333Serj BUS_DMASYNC_POSTREAD); 174315333Serj 175315333Serj /* 176315333Serj * First part: process new packets to send. 177315333Serj * nm_i is the current index in the netmap ring, 178315333Serj * nic_i is the corresponding index in the NIC ring. 179315333Serj * The two numbers differ because upon a *_init() we reset 180315333Serj * the NIC ring but leave the netmap ring unchanged. 181315333Serj * For the transmit ring, we have 182315333Serj * 183315333Serj * nm_i = kring->nr_hwcur 184315333Serj * nic_i = IXGBE_TDT (not tracked in the driver) 185315333Serj * and 186315333Serj * nm_i == (nic_i + kring->nkr_hwofs) % ring_size 187315333Serj * 188315333Serj * In this driver kring->nkr_hwofs >= 0, but for other 189315333Serj * drivers it might be negative as well. 190315333Serj */ 191315333Serj 192315333Serj /* 193315333Serj * If we have packets to send (kring->nr_hwcur != kring->rhead) 194315333Serj * iterate over the netmap ring, fetch length and update 195315333Serj * the corresponding slot in the NIC ring. Some drivers also 196315333Serj * need to update the buffer's physical address in the NIC slot 197315333Serj * even NS_BUF_CHANGED is not set (PNMB computes the addresses). 198315333Serj * 199315333Serj * The netmap_reload_map() calls is especially expensive, 200315333Serj * even when (as in this case) the tag is 0, so do only 201315333Serj * when the buffer has actually changed. 202315333Serj * 203315333Serj * If possible do not set the report/intr bit on all slots, 204315333Serj * but only a few times per ring or when NS_REPORT is set. 205315333Serj * 206315333Serj * Finally, on 10G and faster drivers, it might be useful 207315333Serj * to prefetch the next slot and txr entry. 208315333Serj */ 209315333Serj 210315333Serj nm_i = kring->nr_hwcur; 211315333Serj if (nm_i != head) { /* we have new packets to send */ 212315333Serj nic_i = netmap_idx_k2n(kring, nm_i); 213315333Serj 214315333Serj __builtin_prefetch(&ring->slot[nm_i]); 215315333Serj __builtin_prefetch(&txr->tx_buffers[nic_i]); 216315333Serj 217315333Serj for (n = 0; nm_i != head; n++) { 218315333Serj struct netmap_slot *slot = &ring->slot[nm_i]; 219315333Serj u_int len = slot->len; 220315333Serj uint64_t paddr; 221315333Serj void *addr = PNMB(na, slot, &paddr); 222315333Serj 223315333Serj /* device-specific */ 224315333Serj union ixgbe_adv_tx_desc *curr = &txr->tx_base[nic_i]; 225315333Serj struct ixgbe_tx_buf *txbuf = &txr->tx_buffers[nic_i]; 226315333Serj int flags = (slot->flags & NS_REPORT || 227315333Serj nic_i == 0 || nic_i == report_frequency) ? 228315333Serj IXGBE_TXD_CMD_RS : 0; 229315333Serj 230315333Serj /* prefetch for next round */ 231315333Serj __builtin_prefetch(&ring->slot[nm_i + 1]); 232315333Serj __builtin_prefetch(&txr->tx_buffers[nic_i + 1]); 233315333Serj 234315333Serj NM_CHECK_ADDR_LEN(na, addr, len); 235315333Serj 236315333Serj if (slot->flags & NS_BUF_CHANGED) { 237315333Serj /* buffer has changed, reload map */ 238315333Serj netmap_reload_map(na, txr->txtag, txbuf->map, addr); 239315333Serj } 240315333Serj slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); 241315333Serj 242315333Serj /* Fill the slot in the NIC ring. */ 243315333Serj /* Use legacy descriptor, they are faster? */ 244315333Serj curr->read.buffer_addr = htole64(paddr); 245315333Serj curr->read.olinfo_status = 0; 246315333Serj curr->read.cmd_type_len = htole32(len | flags | 247315333Serj IXGBE_ADVTXD_DCMD_IFCS | IXGBE_TXD_CMD_EOP); 248315333Serj 249315333Serj /* make sure changes to the buffer are synced */ 250315333Serj bus_dmamap_sync(txr->txtag, txbuf->map, 251315333Serj BUS_DMASYNC_PREWRITE); 252315333Serj 253315333Serj nm_i = nm_next(nm_i, lim); 254315333Serj nic_i = nm_next(nic_i, lim); 255315333Serj } 256315333Serj kring->nr_hwcur = head; 257315333Serj 258315333Serj /* synchronize the NIC ring */ 259315333Serj bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 260315333Serj BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 261315333Serj 262315333Serj /* (re)start the tx unit up to slot nic_i (excluded) */ 263315333Serj IXGBE_WRITE_REG(&adapter->hw, txr->tail, nic_i); 264315333Serj } 265315333Serj 266315333Serj /* 267315333Serj * Second part: reclaim buffers for completed transmissions. 268315333Serj * Because this is expensive (we read a NIC register etc.) 269315333Serj * we only do it in specific cases (see below). 270315333Serj */ 271315333Serj if (flags & NAF_FORCE_RECLAIM) { 272315333Serj reclaim_tx = 1; /* forced reclaim */ 273315333Serj } else if (!nm_kr_txempty(kring)) { 274315333Serj reclaim_tx = 0; /* have buffers, no reclaim */ 275315333Serj } else { 276315333Serj /* 277315333Serj * No buffers available. Locate previous slot with 278315333Serj * REPORT_STATUS set. 279315333Serj * If the slot has DD set, we can reclaim space, 280315333Serj * otherwise wait for the next interrupt. 281315333Serj * This enables interrupt moderation on the tx 282315333Serj * side though it might reduce throughput. 283315333Serj */ 284315333Serj struct ixgbe_legacy_tx_desc *txd = 285315333Serj (struct ixgbe_legacy_tx_desc *)txr->tx_base; 286315333Serj 287315333Serj nic_i = txr->next_to_clean + report_frequency; 288315333Serj if (nic_i > lim) 289315333Serj nic_i -= lim + 1; 290315333Serj // round to the closest with dd set 291315333Serj nic_i = (nic_i < kring->nkr_num_slots / 4 || 292315333Serj nic_i >= kring->nkr_num_slots*3/4) ? 293315333Serj 0 : report_frequency; 294315333Serj reclaim_tx = txd[nic_i].upper.fields.status & IXGBE_TXD_STAT_DD; // XXX cpu_to_le32 ? 295315333Serj } 296315333Serj if (reclaim_tx) { 297315333Serj /* 298315333Serj * Record completed transmissions. 299315333Serj * We (re)use the driver's txr->next_to_clean to keep 300315333Serj * track of the most recently completed transmission. 301315333Serj * 302315333Serj * The datasheet discourages the use of TDH to find 303315333Serj * out the number of sent packets, but we only set 304315333Serj * REPORT_STATUS in a few slots so TDH is the only 305315333Serj * good way. 306315333Serj */ 307315333Serj nic_i = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(kring->ring_id)); 308315333Serj if (nic_i >= kring->nkr_num_slots) { /* XXX can it happen ? */ 309315333Serj D("TDH wrap %d", nic_i); 310315333Serj nic_i -= kring->nkr_num_slots; 311315333Serj } 312315333Serj if (nic_i != txr->next_to_clean) { 313315333Serj /* some tx completed, increment avail */ 314315333Serj txr->next_to_clean = nic_i; 315315333Serj kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim); 316315333Serj } 317315333Serj } 318315333Serj 319323830Smarius nm_txsync_finalize(kring); 320323830Smarius 321315333Serj return 0; 322315333Serj} 323315333Serj 324315333Serj 325315333Serj/* 326315333Serj * Reconcile kernel and user view of the receive ring. 327315333Serj * Same as for the txsync, this routine must be efficient. 328315333Serj * The caller guarantees a single invocations, but races against 329315333Serj * the rest of the driver should be handled here. 330315333Serj * 331315333Serj * On call, kring->rhead is the first packet that userspace wants 332315333Serj * to keep, and kring->rcur is the wakeup point. 333315333Serj * The kernel has previously reported packets up to kring->rtail. 334315333Serj * 335315333Serj * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective 336315333Serj * of whether or not we received an interrupt. 337315333Serj */ 338315333Serjstatic int 339315333Serjixgbe_netmap_rxsync(struct netmap_kring *kring, int flags) 340315333Serj{ 341315333Serj struct netmap_adapter *na = kring->na; 342315333Serj struct ifnet *ifp = na->ifp; 343315333Serj struct netmap_ring *ring = kring->ring; 344315333Serj u_int nm_i; /* index into the netmap ring */ 345315333Serj u_int nic_i; /* index into the NIC ring */ 346315333Serj u_int n; 347315333Serj u_int const lim = kring->nkr_num_slots - 1; 348323830Smarius u_int const head = nm_rxsync_prologue(kring); 349315333Serj int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; 350315333Serj 351315333Serj /* device-specific */ 352315333Serj struct adapter *adapter = ifp->if_softc; 353315333Serj struct rx_ring *rxr = &adapter->rx_rings[kring->ring_id]; 354315333Serj 355315333Serj if (head > lim) 356315333Serj return netmap_ring_reinit(kring); 357315333Serj 358315333Serj /* XXX check sync modes */ 359315333Serj bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 360315333Serj BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 361315333Serj 362315333Serj /* 363315333Serj * First part: import newly received packets. 364315333Serj * 365315333Serj * nm_i is the index of the next free slot in the netmap ring, 366315333Serj * nic_i is the index of the next received packet in the NIC ring, 367315333Serj * and they may differ in case if_init() has been called while 368315333Serj * in netmap mode. For the receive ring we have 369315333Serj * 370315333Serj * nic_i = rxr->next_to_check; 371315333Serj * nm_i = kring->nr_hwtail (previous) 372315333Serj * and 373315333Serj * nm_i == (nic_i + kring->nkr_hwofs) % ring_size 374315333Serj * 375315333Serj * rxr->next_to_check is set to 0 on a ring reinit 376315333Serj */ 377315333Serj if (netmap_no_pendintr || force_update) { 378315333Serj int crclen = (ix_crcstrip) ? 0 : 4; 379315333Serj uint16_t slot_flags = kring->nkr_slot_flags; 380315333Serj 381315333Serj nic_i = rxr->next_to_check; // or also k2n(kring->nr_hwtail) 382315333Serj nm_i = netmap_idx_n2k(kring, nic_i); 383315333Serj 384315333Serj for (n = 0; ; n++) { 385315333Serj union ixgbe_adv_rx_desc *curr = &rxr->rx_base[nic_i]; 386315333Serj uint32_t staterr = le32toh(curr->wb.upper.status_error); 387315333Serj 388315333Serj if ((staterr & IXGBE_RXD_STAT_DD) == 0) 389315333Serj break; 390315333Serj ring->slot[nm_i].len = le16toh(curr->wb.upper.length) - crclen; 391315333Serj ring->slot[nm_i].flags = slot_flags; 392315333Serj bus_dmamap_sync(rxr->ptag, 393315333Serj rxr->rx_buffers[nic_i].pmap, BUS_DMASYNC_POSTREAD); 394315333Serj nm_i = nm_next(nm_i, lim); 395315333Serj nic_i = nm_next(nic_i, lim); 396315333Serj } 397315333Serj if (n) { /* update the state variables */ 398315333Serj if (netmap_no_pendintr && !force_update) { 399315333Serj /* diagnostics */ 400315333Serj ix_rx_miss ++; 401315333Serj ix_rx_miss_bufs += n; 402315333Serj } 403315333Serj rxr->next_to_check = nic_i; 404315333Serj kring->nr_hwtail = nm_i; 405315333Serj } 406315333Serj kring->nr_kflags &= ~NKR_PENDINTR; 407315333Serj } 408315333Serj 409315333Serj /* 410315333Serj * Second part: skip past packets that userspace has released. 411315333Serj * (kring->nr_hwcur to kring->rhead excluded), 412315333Serj * and make the buffers available for reception. 413315333Serj * As usual nm_i is the index in the netmap ring, 414315333Serj * nic_i is the index in the NIC ring, and 415315333Serj * nm_i == (nic_i + kring->nkr_hwofs) % ring_size 416315333Serj */ 417315333Serj nm_i = kring->nr_hwcur; 418315333Serj if (nm_i != head) { 419315333Serj nic_i = netmap_idx_k2n(kring, nm_i); 420315333Serj for (n = 0; nm_i != head; n++) { 421315333Serj struct netmap_slot *slot = &ring->slot[nm_i]; 422315333Serj uint64_t paddr; 423315333Serj void *addr = PNMB(na, slot, &paddr); 424315333Serj 425315333Serj union ixgbe_adv_rx_desc *curr = &rxr->rx_base[nic_i]; 426315333Serj struct ixgbe_rx_buf *rxbuf = &rxr->rx_buffers[nic_i]; 427315333Serj 428315333Serj if (addr == NETMAP_BUF_BASE(na)) /* bad buf */ 429315333Serj goto ring_reset; 430315333Serj 431315333Serj if (slot->flags & NS_BUF_CHANGED) { 432315333Serj /* buffer has changed, reload map */ 433315333Serj netmap_reload_map(na, rxr->ptag, rxbuf->pmap, addr); 434315333Serj slot->flags &= ~NS_BUF_CHANGED; 435315333Serj } 436315333Serj curr->wb.upper.status_error = 0; 437315333Serj curr->read.pkt_addr = htole64(paddr); 438315333Serj bus_dmamap_sync(rxr->ptag, rxbuf->pmap, 439315333Serj BUS_DMASYNC_PREREAD); 440315333Serj nm_i = nm_next(nm_i, lim); 441315333Serj nic_i = nm_next(nic_i, lim); 442315333Serj } 443315333Serj kring->nr_hwcur = head; 444315333Serj 445315333Serj bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 446315333Serj BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 447315333Serj /* 448315333Serj * IMPORTANT: we must leave one free slot in the ring, 449315333Serj * so move nic_i back by one unit 450315333Serj */ 451315333Serj nic_i = nm_prev(nic_i, lim); 452315333Serj IXGBE_WRITE_REG(&adapter->hw, rxr->tail, nic_i); 453315333Serj } 454315333Serj 455323830Smarius /* tell userspace that there might be new packets */ 456323830Smarius nm_rxsync_finalize(kring); 457323830Smarius 458315333Serj return 0; 459315333Serj 460315333Serjring_reset: 461315333Serj return netmap_ring_reinit(kring); 462315333Serj} 463315333Serj 464315333Serj 465315333Serj/* 466315333Serj * The attach routine, called near the end of ixgbe_attach(), 467315333Serj * fills the parameters for netmap_attach() and calls it. 468315333Serj * It cannot fail, in the worst case (such as no memory) 469315333Serj * netmap mode will be disabled and the driver will only 470315333Serj * operate in standard mode. 471315333Serj */ 472315333Serjvoid 473315333Serjixgbe_netmap_attach(struct adapter *adapter) 474315333Serj{ 475315333Serj struct netmap_adapter na; 476315333Serj 477315333Serj bzero(&na, sizeof(na)); 478315333Serj 479315333Serj na.ifp = adapter->ifp; 480315333Serj na.na_flags = NAF_BDG_MAYSLEEP; 481315333Serj na.num_tx_desc = adapter->num_tx_desc; 482315333Serj na.num_rx_desc = adapter->num_rx_desc; 483315333Serj na.nm_txsync = ixgbe_netmap_txsync; 484315333Serj na.nm_rxsync = ixgbe_netmap_rxsync; 485315333Serj na.nm_register = ixgbe_netmap_reg; 486315333Serj na.num_tx_rings = na.num_rx_rings = adapter->num_queues; 487315333Serj netmap_attach(&na); 488315333Serj} 489315333Serj 490315333Serj#endif /* DEV_NETMAP */ 491315333Serj 492315333Serj/* end of file */ 493