if_igb_netmap.h revision 231881
1114902Sscottl/* 2114902Sscottl * Copyright (C) 2011 Universita` di Pisa. All rights reserved. 3114902Sscottl * 4114902Sscottl * Redistribution and use in source and binary forms, with or without 5114902Sscottl * modification, are permitted provided that the following conditions 6114902Sscottl * are met: 7114902Sscottl * 1. Redistributions of source code must retain the above copyright 8114902Sscottl * notice, this list of conditions and the following disclaimer. 9114902Sscottl * 2. Redistributions in binary form must reproduce the above copyright 10114902Sscottl * notice, this list of conditions and the following disclaimer in the 11114902Sscottl * documentation and/or other materials provided with the distribution. 12114902Sscottl * 13114902Sscottl * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14114902Sscottl * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15114902Sscottl * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16114902Sscottl * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17114902Sscottl * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18114902Sscottl * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19114902Sscottl * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20114902Sscottl * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21114902Sscottl * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22114902Sscottl * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23114902Sscottl * SUCH DAMAGE. 24114902Sscottl */ 25114902Sscottl 26114902Sscottl/* 27114902Sscottl * $FreeBSD: head/sys/dev/netmap/if_igb_netmap.h 231881 2012-02-17 14:09:04Z luigi $ 28114902Sscottl * $Id: if_igb_netmap.h 9802 2011-12-02 18:42:37Z luigi $ 29119418Sobrien * 30119418Sobrien * netmap modifications for igb contributed by Ahmed Kooli 31114902Sscottl */ 32152919Sscottl 33114902Sscottl#include <net/netmap.h> 34119418Sobrien#include <sys/selinfo.h> 35152919Sscottl#include <vm/vm.h> 36152919Sscottl#include <vm/pmap.h> /* vtophys ? */ 37152919Sscottl#include <dev/netmap/netmap_kern.h> 38114902Sscottl 39119997Spsstatic int igb_netmap_reg(struct ifnet *, int onoff); 40114902Sscottlstatic int igb_netmap_txsync(struct ifnet *, u_int, int); 41114902Sscottlstatic int igb_netmap_rxsync(struct ifnet *, u_int, int); 42114902Sscottlstatic void igb_netmap_lock_wrapper(struct ifnet *, int, u_int); 43114902Sscottl 44114902Sscottl 45114902Sscottlstatic void 46114902Sscottligb_netmap_attach(struct adapter *adapter) 47143160Simp{ 48114902Sscottl struct netmap_adapter na; 49114902Sscottl 50114902Sscottl bzero(&na, sizeof(na)); 51143160Simp 52127205Sscottl na.ifp = adapter->ifp; 53127205Sscottl na.separate_locks = 1; 54127205Sscottl na.num_tx_desc = adapter->num_tx_desc; 55143160Simp na.num_rx_desc = adapter->num_rx_desc; 56127205Sscottl na.nm_txsync = igb_netmap_txsync; 57114902Sscottl na.nm_rxsync = igb_netmap_rxsync; 58114902Sscottl na.nm_lock = igb_netmap_lock_wrapper; 59114902Sscottl na.nm_register = igb_netmap_reg; 60114902Sscottl netmap_attach(&na, adapter->num_queues); 61114902Sscottl} 62114902Sscottl 63114902Sscottl 64114902Sscottl/* 65116852Sscottl * wrapper to export locks to the generic code 66117167Sjhb */ 67116852Sscottlstatic void 68116852Sscottligb_netmap_lock_wrapper(struct ifnet *ifp, int what, u_int queueid) 69116852Sscottl{ 70116852Sscottl struct adapter *adapter = ifp->if_softc; 71114902Sscottl 72114902Sscottl ASSERT(queueid < adapter->num_queues); 73114902Sscottl switch (what) { 74114902Sscottl case NETMAP_CORE_LOCK: 75114902Sscottl IGB_CORE_LOCK(adapter); 76114902Sscottl break; 77114902Sscottl case NETMAP_CORE_UNLOCK: 78114902Sscottl IGB_CORE_UNLOCK(adapter); 79114902Sscottl break; 80114902Sscottl case NETMAP_TX_LOCK: 81114902Sscottl IGB_TX_LOCK(&adapter->tx_rings[queueid]); 82114902Sscottl break; 83114902Sscottl case NETMAP_TX_UNLOCK: 84141062Sscottl IGB_TX_UNLOCK(&adapter->tx_rings[queueid]); 85114902Sscottl break; 86114902Sscottl case NETMAP_RX_LOCK: 87114902Sscottl IGB_RX_LOCK(&adapter->rx_rings[queueid]); 88114902Sscottl break; 89141062Sscottl case NETMAP_RX_UNLOCK: 90127205Sscottl IGB_RX_UNLOCK(&adapter->rx_rings[queueid]); 91127205Sscottl break; 92127205Sscottl } 93127205Sscottl} 94141062Sscottl 95114902Sscottl 96114902Sscottl/* 97114902Sscottl * register-unregister routine 98114902Sscottl */ 99114902Sscottlstatic int 100114902Sscottligb_netmap_reg(struct ifnet *ifp, int onoff) 101114902Sscottl{ 102114902Sscottl struct adapter *adapter = ifp->if_softc; 103114902Sscottl struct netmap_adapter *na = NA(ifp); 104114902Sscottl int error = 0; 105127205Sscottl 106127205Sscottl if (na == NULL) 107114902Sscottl return EINVAL; /* no netmap support here */ 108127205Sscottl 109114902Sscottl igb_disable_intr(adapter); 110127135Snjl 111127135Snjl /* Tell the stack that the interface is no longer active */ 112114902Sscottl ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 113114902Sscottl 114114902Sscottl if (onoff) { 115119690Sjhb ifp->if_capenable |= IFCAP_NETMAP; 116114902Sscottl 117127135Snjl na->if_transmit = ifp->if_transmit; 118127135Snjl ifp->if_transmit = netmap_start; 119114902Sscottl 120114902Sscottl igb_init_locked(adapter); 121114902Sscottl if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == 0) { 122114902Sscottl error = ENOMEM; 123114902Sscottl goto fail; 124114902Sscottl } 125114902Sscottl } else { 126114902Sscottlfail: 127114902Sscottl /* restore if_transmit */ 128127135Snjl ifp->if_transmit = na->if_transmit; 129127135Snjl ifp->if_capenable &= ~IFCAP_NETMAP; 130114902Sscottl igb_init_locked(adapter); /* also enable intr */ 131114902Sscottl } 132114902Sscottl return (error); 133166901Spiso} 134166901Spiso 135114902Sscottl 136114902Sscottl/* 137114902Sscottl * Reconcile hardware and user view of the transmit ring. 138114902Sscottl */ 139114902Sscottlstatic int 140114902Sscottligb_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock) 141114902Sscottl{ 142114902Sscottl struct adapter *adapter = ifp->if_softc; 143114902Sscottl struct tx_ring *txr = &adapter->tx_rings[ring_nr]; 144114902Sscottl struct netmap_adapter *na = NA(adapter->ifp); 145114902Sscottl struct netmap_kring *kring = &na->tx_rings[ring_nr]; 146114902Sscottl struct netmap_ring *ring = kring->ring; 147114902Sscottl int j, k, l, n = 0, lim = kring->nkr_num_slots - 1; 148114902Sscottl 149140923Sscottl /* generate an interrupt approximately every half ring */ 150140923Sscottl int report_frequency = kring->nkr_num_slots >> 1; 151114902Sscottl 152114902Sscottl k = ring->cur; 153114902Sscottl if (k > lim) 154114902Sscottl return netmap_ring_reinit(kring); 155119997Sps 156119997Sps if (do_lock) 157126364Sscottl IGB_TX_LOCK(txr); 158140923Sscottl bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 159126364Sscottl BUS_DMASYNC_POSTREAD); 160119997Sps 161119997Sps /* check for new packets to send. 162114902Sscottl * j indexes the netmap ring, l indexes the nic ring, and 163119997Sps * j = kring->nr_hwcur, l = E1000_TDT (not tracked), 164114902Sscottl * j == (l + kring->nkr_hwofs) % ring_size 165114902Sscottl */ 166114902Sscottl j = kring->nr_hwcur; 167114902Sscottl if (j != k) { /* we have packets to send */ 168114902Sscottl /* 82575 needs the queue index added */ 169114902Sscottl u32 olinfo_status = 170119997Sps (adapter->hw.mac.type == e1000_82575) ? (txr->me << 4) : 0; 171119997Sps 172119997Sps l = netmap_tidx_k2n(na, ring_nr, j); 173119997Sps for (n = 0; j != k; n++) { 174119997Sps struct netmap_slot *slot = &ring->slot[j]; 175119997Sps union e1000_adv_tx_desc *curr = 176119997Sps (union e1000_adv_tx_desc *)&txr->tx_base[l]; 177119997Sps struct igb_tx_buffer *txbuf = &txr->tx_buffers[l]; 178119997Sps int flags = ((slot->flags & NS_REPORT) || 179119997Sps j == 0 || j == report_frequency) ? 180119997Sps E1000_ADVTXD_DCMD_RS : 0; 181119997Sps uint64_t paddr; 182114902Sscottl void *addr = PNMB(slot, &paddr); 183114902Sscottl int len = slot->len; 184114902Sscottl 185114902Sscottl if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) { 186114902Sscottl if (do_lock) 187114902Sscottl IGB_TX_UNLOCK(txr); 188114902Sscottl return netmap_ring_reinit(kring); 189114902Sscottl } 190114902Sscottl 191114902Sscottl slot->flags &= ~NS_REPORT; 192116852Sscottl // XXX set the address unconditionally 193140923Sscottl curr->read.buffer_addr = htole64(paddr); 194140923Sscottl curr->read.olinfo_status = 195114902Sscottl htole32(olinfo_status | 196114902Sscottl (len<< E1000_ADVTXD_PAYLEN_SHIFT)); 197114902Sscottl curr->read.cmd_type_len = 198114902Sscottl htole32(len | E1000_ADVTXD_DTYP_DATA | 199114902Sscottl E1000_ADVTXD_DCMD_IFCS | 200114902Sscottl E1000_ADVTXD_DCMD_DEXT | 201114902Sscottl E1000_ADVTXD_DCMD_EOP | flags); 202114902Sscottl if (slot->flags & NS_BUF_CHANGED) { 203116852Sscottl /* buffer has changed, reload map */ 204116852Sscottl netmap_reload_map(txr->txtag, txbuf->map, addr); 205116852Sscottl slot->flags &= ~NS_BUF_CHANGED; 206116852Sscottl } 207116852Sscottl 208116852Sscottl bus_dmamap_sync(txr->txtag, txbuf->map, 209126364Sscottl BUS_DMASYNC_PREWRITE); 210116852Sscottl j = (j == lim) ? 0 : j + 1; 211114902Sscottl l = (l == lim) ? 0 : l + 1; 212114902Sscottl } 213114902Sscottl kring->nr_hwcur = k; 214114902Sscottl 215114902Sscottl /* decrease avail by number of sent packets */ 216114902Sscottl kring->nr_hwavail -= n; 217116852Sscottl 218116852Sscottl /* Set the watchdog XXX ? */ 219116852Sscottl txr->queue_status = IGB_QUEUE_WORKING; 220114902Sscottl txr->watchdog_time = ticks; 221114902Sscottl 222114902Sscottl bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 223114902Sscottl BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 224114902Sscottl 225114902Sscottl E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), l); 226114902Sscottl } 227114902Sscottl 228114902Sscottl if (n == 0 || kring->nr_hwavail < 1) { 229114902Sscottl int delta; 230114902Sscottl 231114902Sscottl /* record completed transmissions using TDH */ 232114902Sscottl l = E1000_READ_REG(&adapter->hw, E1000_TDH(ring_nr)); 233114902Sscottl if (l >= kring->nkr_num_slots) { /* XXX can it happen ? */ 234114902Sscottl D("TDH wrap %d", l); 235114902Sscottl l -= kring->nkr_num_slots; 236114902Sscottl } 237114902Sscottl delta = l - txr->next_to_clean; 238114902Sscottl if (delta) { 239 /* some completed, increment hwavail. */ 240 if (delta < 0) 241 delta += kring->nkr_num_slots; 242 txr->next_to_clean = l; 243 kring->nr_hwavail += delta; 244 } 245 } 246 /* update avail to what the hardware knows */ 247 ring->avail = kring->nr_hwavail; 248 249 if (do_lock) 250 IGB_TX_UNLOCK(txr); 251 return 0; 252} 253 254 255/* 256 * Reconcile kernel and user view of the receive ring. 257 */ 258static int 259igb_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock) 260{ 261 struct adapter *adapter = ifp->if_softc; 262 struct rx_ring *rxr = &adapter->rx_rings[ring_nr]; 263 struct netmap_adapter *na = NA(adapter->ifp); 264 struct netmap_kring *kring = &na->rx_rings[ring_nr]; 265 struct netmap_ring *ring = kring->ring; 266 int j, k, l, n, lim = kring->nkr_num_slots - 1; 267 268 k = ring->cur; 269 if (k > lim) 270 return netmap_ring_reinit(kring); 271 272 if (do_lock) 273 IGB_RX_LOCK(rxr); 274 275 /* XXX check sync modes */ 276 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 277 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 278 279 /* import newly received packets into the netmap ring. 280 * j is an index in the netmap ring, l in the NIC ring, and 281 * j = (kring->nr_hwcur + kring->nr_hwavail) % ring_size 282 * l = rxr->next_to_check; 283 * and 284 * j == (l + kring->nkr_hwofs) % ring_size 285 */ 286 l = rxr->next_to_check; 287 j = netmap_ridx_n2k(na, ring_nr, l); 288 for (n = 0; ; n++) { 289 union e1000_adv_rx_desc *curr = &rxr->rx_base[l]; 290 uint32_t staterr = le32toh(curr->wb.upper.status_error); 291 292 if ((staterr & E1000_RXD_STAT_DD) == 0) 293 break; 294 ring->slot[j].len = le16toh(curr->wb.upper.length); 295 bus_dmamap_sync(rxr->ptag, 296 rxr->rx_buffers[l].pmap, BUS_DMASYNC_POSTREAD); 297 j = (j == lim) ? 0 : j + 1; 298 l = (l == lim) ? 0 : l + 1; 299 } 300 if (n) { 301 rxr->next_to_check = l; 302 kring->nr_hwavail += n; 303 } 304 305 /* skip past packets that userspace has already processed */ 306 j = kring->nr_hwcur; 307 if (j != k) { /* userspace has read some packets. */ 308 l = netmap_ridx_k2n(na, ring_nr, j); 309 for (n = 0; j != k; n++) { 310 struct netmap_slot *slot = ring->slot + j; 311 union e1000_adv_rx_desc *curr = &rxr->rx_base[l]; 312 struct igb_rx_buf *rxbuf = rxr->rx_buffers + l; 313 uint64_t paddr; 314 void *addr = PNMB(slot, &paddr); 315 316 if (addr == netmap_buffer_base) { /* bad buf */ 317 if (do_lock) 318 IGB_RX_UNLOCK(rxr); 319 return netmap_ring_reinit(kring); 320 } 321 322 curr->wb.upper.status_error = 0; 323 curr->read.pkt_addr = htole64(paddr); 324 if (slot->flags & NS_BUF_CHANGED) { 325 netmap_reload_map(rxr->ptag, rxbuf->pmap, addr); 326 slot->flags &= ~NS_BUF_CHANGED; 327 } 328 329 bus_dmamap_sync(rxr->ptag, rxbuf->pmap, 330 BUS_DMASYNC_PREREAD); 331 332 j = (j == lim) ? 0 : j + 1; 333 l = (l == lim) ? 0 : l + 1; 334 } 335 kring->nr_hwavail -= n; 336 kring->nr_hwcur = k; 337 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 338 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 339 /* 340 * IMPORTANT: we must leave one free slot in the ring, 341 * so move l back by one unit 342 */ 343 l = (l == 0) ? lim : l - 1; 344 E1000_WRITE_REG(&adapter->hw, E1000_RDT(rxr->me), l); 345 } 346 /* tell userspace that there are new packets */ 347 ring->avail = kring->nr_hwavail ; 348 if (do_lock) 349 IGB_RX_UNLOCK(rxr); 350 return 0; 351} 352/* end of file */ 353