if_re_netmap.h revision 230055
1227614Sluigi/* 2227614Sluigi * Copyright (C) 2011 Luigi Rizzo. All rights reserved. 3227614Sluigi * 4227614Sluigi * Redistribution and use in source and binary forms, with or without 5227614Sluigi * modification, are permitted provided that the following conditions 6227614Sluigi * are met: 7227614Sluigi * 1. Redistributions of source code must retain the above copyright 8227614Sluigi * notice, this list of conditions and the following disclaimer. 9227614Sluigi * 2. Redistributions in binary form must reproduce the above copyright 10227614Sluigi * notice, this list of conditions and the following disclaimer in the 11227614Sluigi * documentation and/or other materials provided with the distribution. 12227614Sluigi * 13227614Sluigi * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14227614Sluigi * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15227614Sluigi * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16227614Sluigi * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17227614Sluigi * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18227614Sluigi * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19227614Sluigi * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20227614Sluigi * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21227614Sluigi * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22227614Sluigi * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23227614Sluigi * SUCH DAMAGE. 24227614Sluigi */ 25227614Sluigi 26227614Sluigi/* 27227614Sluigi * $FreeBSD: head/sys/dev/netmap/if_re_netmap.h 230055 2012-01-13 11:01:23Z luigi $ 28230055Sluigi * $Id: if_re_netmap.h 10075 2011-12-25 22:55:48Z luigi $ 29227614Sluigi * 30227614Sluigi * netmap support for if_re 31227614Sluigi */ 32227614Sluigi 33227614Sluigi#include <net/netmap.h> 34227614Sluigi#include <sys/selinfo.h> 35227614Sluigi#include <vm/vm.h> 36227614Sluigi#include <vm/pmap.h> /* vtophys ? */ 37227614Sluigi#include <dev/netmap/netmap_kern.h> 38227614Sluigi 39227614Sluigistatic int re_netmap_reg(struct ifnet *, int onoff); 40227614Sluigistatic int re_netmap_txsync(void *, u_int, int); 41227614Sluigistatic int re_netmap_rxsync(void *, u_int, int); 42227614Sluigistatic void re_netmap_lock_wrapper(void *, int, u_int); 43227614Sluigi 44227614Sluigistatic void 45227614Sluigire_netmap_attach(struct rl_softc *sc) 46227614Sluigi{ 47227614Sluigi struct netmap_adapter na; 48227614Sluigi 49227614Sluigi bzero(&na, sizeof(na)); 50227614Sluigi 51227614Sluigi na.ifp = sc->rl_ifp; 52227614Sluigi na.separate_locks = 0; 53227614Sluigi na.num_tx_desc = sc->rl_ldata.rl_tx_desc_cnt; 54227614Sluigi na.num_rx_desc = sc->rl_ldata.rl_rx_desc_cnt; 55227614Sluigi na.nm_txsync = re_netmap_txsync; 56227614Sluigi na.nm_rxsync = re_netmap_rxsync; 57227614Sluigi na.nm_lock = re_netmap_lock_wrapper; 58227614Sluigi na.nm_register = re_netmap_reg; 59228276Sluigi na.buff_size = NETMAP_BUF_SIZE; 60227614Sluigi netmap_attach(&na, 1); 61227614Sluigi} 62227614Sluigi 63227614Sluigi 64227614Sluigi/* 65227614Sluigi * wrapper to export locks to the generic code 66227614Sluigi * We should not use the tx/rx locks 67227614Sluigi */ 68227614Sluigistatic void 69227614Sluigire_netmap_lock_wrapper(void *_a, int what, u_int queueid) 70227614Sluigi{ 71227614Sluigi struct rl_softc *adapter = _a; 72227614Sluigi 73227614Sluigi switch (what) { 74227614Sluigi case NETMAP_CORE_LOCK: 75227614Sluigi RL_LOCK(adapter); 76227614Sluigi break; 77227614Sluigi case NETMAP_CORE_UNLOCK: 78227614Sluigi RL_UNLOCK(adapter); 79227614Sluigi break; 80227614Sluigi 81227614Sluigi case NETMAP_TX_LOCK: 82227614Sluigi case NETMAP_RX_LOCK: 83227614Sluigi case NETMAP_TX_UNLOCK: 84227614Sluigi case NETMAP_RX_UNLOCK: 85227614Sluigi D("invalid lock call %d, no tx/rx locks here", what); 86227614Sluigi break; 87227614Sluigi } 88227614Sluigi} 89227614Sluigi 90227614Sluigi 91227614Sluigi/* 92227614Sluigi * support for netmap register/unregisted. We are already under core lock. 93227614Sluigi * only called on the first register or the last unregister. 94227614Sluigi */ 95227614Sluigistatic int 96227614Sluigire_netmap_reg(struct ifnet *ifp, int onoff) 97227614Sluigi{ 98227614Sluigi struct rl_softc *adapter = ifp->if_softc; 99227614Sluigi struct netmap_adapter *na = NA(ifp); 100227614Sluigi int error = 0; 101227614Sluigi 102228276Sluigi if (na == NULL) 103227614Sluigi return EINVAL; 104227614Sluigi /* Tell the stack that the interface is no longer active */ 105227614Sluigi ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 106227614Sluigi 107227614Sluigi re_stop(adapter); 108227614Sluigi 109227614Sluigi if (onoff) { 110227614Sluigi ifp->if_capenable |= IFCAP_NETMAP; 111227614Sluigi 112228276Sluigi /* save if_transmit to restore it later */ 113227614Sluigi na->if_transmit = ifp->if_transmit; 114227614Sluigi ifp->if_transmit = netmap_start; 115227614Sluigi 116227614Sluigi re_init_locked(adapter); 117227614Sluigi 118227614Sluigi if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == 0) { 119227614Sluigi error = ENOMEM; 120227614Sluigi goto fail; 121227614Sluigi } 122227614Sluigi } else { 123227614Sluigifail: 124227614Sluigi /* restore if_transmit */ 125227614Sluigi ifp->if_transmit = na->if_transmit; 126227614Sluigi ifp->if_capenable &= ~IFCAP_NETMAP; 127227614Sluigi re_init_locked(adapter); /* also enables intr */ 128227614Sluigi } 129228276Sluigi return (error); 130227614Sluigi} 131227614Sluigi 132227614Sluigi 133227614Sluigi/* 134227614Sluigi * Reconcile kernel and user view of the transmit ring. 135227614Sluigi */ 136227614Sluigistatic int 137227614Sluigire_netmap_txsync(void *a, u_int ring_nr, int do_lock) 138227614Sluigi{ 139227614Sluigi struct rl_softc *sc = a; 140227614Sluigi struct rl_txdesc *txd = sc->rl_ldata.rl_tx_desc; 141227614Sluigi struct netmap_adapter *na = NA(sc->rl_ifp); 142227614Sluigi struct netmap_kring *kring = &na->tx_rings[ring_nr]; 143227614Sluigi struct netmap_ring *ring = kring->ring; 144228276Sluigi int j, k, l, n, lim = kring->nkr_num_slots - 1; 145227614Sluigi 146227614Sluigi k = ring->cur; 147228276Sluigi if (k > lim) 148227614Sluigi return netmap_ring_reinit(kring); 149227614Sluigi 150227614Sluigi if (do_lock) 151227614Sluigi RL_LOCK(sc); 152227614Sluigi 153227614Sluigi /* Sync the TX descriptor list */ 154227614Sluigi bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 155227614Sluigi sc->rl_ldata.rl_tx_list_map, 156227614Sluigi BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 157227614Sluigi 158228276Sluigi /* XXX move after the transmissions */ 159227614Sluigi /* record completed transmissions */ 160228276Sluigi for (n = 0, l = sc->rl_ldata.rl_tx_considx; 161228276Sluigi l != sc->rl_ldata.rl_tx_prodidx; 162228276Sluigi n++, l = RL_TX_DESC_NXT(sc, l)) { 163227614Sluigi uint32_t cmdstat = 164228276Sluigi le32toh(sc->rl_ldata.rl_tx_list[l].rl_cmdstat); 165227614Sluigi if (cmdstat & RL_TDESC_STAT_OWN) 166227614Sluigi break; 167227614Sluigi } 168227614Sluigi if (n > 0) { 169228276Sluigi sc->rl_ldata.rl_tx_considx = l; 170227614Sluigi sc->rl_ldata.rl_tx_free += n; 171227614Sluigi kring->nr_hwavail += n; 172227614Sluigi } 173227614Sluigi 174227614Sluigi /* update avail to what the hardware knows */ 175227614Sluigi ring->avail = kring->nr_hwavail; 176227614Sluigi 177228276Sluigi j = kring->nr_hwcur; 178227614Sluigi if (j != k) { /* we have new packets to send */ 179227614Sluigi n = 0; 180228276Sluigi l = sc->rl_ldata.rl_tx_prodidx; 181227614Sluigi while (j != k) { 182227614Sluigi struct netmap_slot *slot = &ring->slot[j]; 183228276Sluigi struct rl_desc *desc = &sc->rl_ldata.rl_tx_list[l]; 184227614Sluigi int cmd = slot->len | RL_TDESC_CMD_EOF | 185227614Sluigi RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF ; 186229939Sluigi uint64_t paddr; 187229939Sluigi void *addr = PNMB(slot, &paddr); 188227614Sluigi int len = slot->len; 189227614Sluigi 190227614Sluigi if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) { 191227614Sluigi if (do_lock) 192227614Sluigi RL_UNLOCK(sc); 193228276Sluigi // XXX what about prodidx ? 194227614Sluigi return netmap_ring_reinit(kring); 195227614Sluigi } 196227614Sluigi 197228276Sluigi if (l == lim) /* mark end of ring */ 198227614Sluigi cmd |= RL_TDESC_CMD_EOR; 199227614Sluigi 200227614Sluigi if (slot->flags & NS_BUF_CHANGED) { 201227614Sluigi desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr)); 202227614Sluigi desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr)); 203227614Sluigi /* buffer has changed, unload and reload map */ 204227614Sluigi netmap_reload_map(sc->rl_ldata.rl_tx_mtag, 205229939Sluigi txd[l].tx_dmamap, addr); 206227614Sluigi slot->flags &= ~NS_BUF_CHANGED; 207227614Sluigi } 208227614Sluigi slot->flags &= ~NS_REPORT; 209227614Sluigi desc->rl_cmdstat = htole32(cmd); 210227614Sluigi bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, 211228276Sluigi txd[l].tx_dmamap, BUS_DMASYNC_PREWRITE); 212227614Sluigi j = (j == lim) ? 0 : j + 1; 213228276Sluigi l = (l == lim) ? 0 : l + 1; 214227614Sluigi n++; 215227614Sluigi } 216228276Sluigi sc->rl_ldata.rl_tx_prodidx = l; 217228276Sluigi kring->nr_hwcur = k; 218227614Sluigi 219227614Sluigi /* decrease avail by number of sent packets */ 220227614Sluigi ring->avail -= n; 221227614Sluigi kring->nr_hwavail = ring->avail; 222227614Sluigi 223227614Sluigi bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 224227614Sluigi sc->rl_ldata.rl_tx_list_map, 225227614Sluigi BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 226227614Sluigi 227227614Sluigi /* start ? */ 228227614Sluigi CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 229227614Sluigi } 230227614Sluigi if (do_lock) 231227614Sluigi RL_UNLOCK(sc); 232227614Sluigi return 0; 233227614Sluigi} 234227614Sluigi 235227614Sluigi 236227614Sluigi/* 237227614Sluigi * Reconcile kernel and user view of the receive ring. 238227614Sluigi */ 239227614Sluigistatic int 240227614Sluigire_netmap_rxsync(void *a, u_int ring_nr, int do_lock) 241227614Sluigi{ 242227614Sluigi struct rl_softc *sc = a; 243227614Sluigi struct rl_rxdesc *rxd = sc->rl_ldata.rl_rx_desc; 244227614Sluigi struct netmap_adapter *na = NA(sc->rl_ifp); 245227614Sluigi struct netmap_kring *kring = &na->rx_rings[ring_nr]; 246227614Sluigi struct netmap_ring *ring = kring->ring; 247228276Sluigi int j, k, l, n, lim = kring->nkr_num_slots - 1; 248227614Sluigi 249227614Sluigi k = ring->cur; 250228276Sluigi if (k > lim) 251227614Sluigi return netmap_ring_reinit(kring); 252227614Sluigi 253227614Sluigi if (do_lock) 254227614Sluigi RL_LOCK(sc); 255227614Sluigi /* XXX check sync modes */ 256227614Sluigi bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 257227614Sluigi sc->rl_ldata.rl_rx_list_map, 258227614Sluigi BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 259227614Sluigi 260227614Sluigi /* 261227614Sluigi * The device uses all the buffers in the ring, so we need 262227614Sluigi * another termination condition in addition to RL_RDESC_STAT_OWN 263227614Sluigi * cleared (all buffers could have it cleared. The easiest one 264227614Sluigi * is to limit the amount of data reported up to 'lim' 265227614Sluigi */ 266228276Sluigi l = sc->rl_ldata.rl_rx_prodidx; /* next pkt to check */ 267228276Sluigi j = l + kring->nkr_hwofs; 268227614Sluigi for (n = kring->nr_hwavail; n < lim ; n++) { 269228276Sluigi struct rl_desc *cur_rx = &sc->rl_ldata.rl_rx_list[l]; 270227614Sluigi uint32_t rxstat = le32toh(cur_rx->rl_cmdstat); 271227614Sluigi uint32_t total_len; 272227614Sluigi 273227614Sluigi if ((rxstat & RL_RDESC_STAT_OWN) != 0) 274227614Sluigi break; 275227614Sluigi total_len = rxstat & sc->rl_rxlenmask; 276227614Sluigi /* XXX subtract crc */ 277227614Sluigi total_len = (total_len < 4) ? 0 : total_len - 4; 278227614Sluigi kring->ring->slot[j].len = total_len; 279227614Sluigi /* sync was in re_newbuf() */ 280227614Sluigi bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, 281228276Sluigi rxd[l].rx_dmamap, BUS_DMASYNC_POSTREAD); 282228276Sluigi j = (j == lim) ? 0 : j + 1; 283228276Sluigi l = (l == lim) ? 0 : l + 1; 284227614Sluigi } 285227614Sluigi if (n != kring->nr_hwavail) { 286228276Sluigi sc->rl_ldata.rl_rx_prodidx = l; 287227614Sluigi sc->rl_ifp->if_ipackets += n - kring->nr_hwavail; 288227614Sluigi kring->nr_hwavail = n; 289227614Sluigi } 290227614Sluigi 291227614Sluigi /* skip past packets that userspace has already processed, 292227614Sluigi * making them available for reception. 293227614Sluigi * advance nr_hwcur and issue a bus_dmamap_sync on the 294227614Sluigi * buffers so it is safe to write to them. 295227614Sluigi * Also increase nr_hwavail 296227614Sluigi */ 297227614Sluigi j = kring->nr_hwcur; 298227614Sluigi if (j != k) { /* userspace has read some packets. */ 299227614Sluigi n = 0; 300228276Sluigi l = kring->nr_hwcur - kring->nkr_hwofs; 301228276Sluigi if (l < 0) 302228276Sluigi l += lim + 1; 303227614Sluigi while (j != k) { 304227614Sluigi struct netmap_slot *slot = ring->slot + j; 305228276Sluigi struct rl_desc *desc = &sc->rl_ldata.rl_rx_list[l]; 306227614Sluigi int cmd = na->buff_size | RL_RDESC_CMD_OWN; 307229939Sluigi uint64_t paddr; 308229939Sluigi void *addr = PNMB(slot, &paddr); 309227614Sluigi 310227614Sluigi if (addr == netmap_buffer_base) { /* bad buf */ 311227614Sluigi if (do_lock) 312227614Sluigi RL_UNLOCK(sc); 313227614Sluigi return netmap_ring_reinit(kring); 314227614Sluigi } 315227614Sluigi 316228276Sluigi if (l == lim) /* mark end of ring */ 317227614Sluigi cmd |= RL_RDESC_CMD_EOR; 318227614Sluigi 319227614Sluigi desc->rl_cmdstat = htole32(cmd); 320227614Sluigi slot->flags &= ~NS_REPORT; 321227614Sluigi if (slot->flags & NS_BUF_CHANGED) { 322227614Sluigi desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr)); 323227614Sluigi desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr)); 324227614Sluigi netmap_reload_map(sc->rl_ldata.rl_rx_mtag, 325229939Sluigi rxd[l].rx_dmamap, addr); 326227614Sluigi slot->flags &= ~NS_BUF_CHANGED; 327227614Sluigi } 328227614Sluigi bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, 329228276Sluigi rxd[l].rx_dmamap, BUS_DMASYNC_PREREAD); 330227614Sluigi j = (j == lim) ? 0 : j + 1; 331228276Sluigi l = (l == lim) ? 0 : l + 1; 332227614Sluigi n++; 333227614Sluigi } 334227614Sluigi kring->nr_hwavail -= n; 335227614Sluigi kring->nr_hwcur = k; 336227614Sluigi /* Flush the RX DMA ring */ 337227614Sluigi 338227614Sluigi bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 339227614Sluigi sc->rl_ldata.rl_rx_list_map, 340227614Sluigi BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 341227614Sluigi } 342227614Sluigi /* tell userspace that there are new packets */ 343228276Sluigi ring->avail = kring->nr_hwavail; 344227614Sluigi if (do_lock) 345227614Sluigi RL_UNLOCK(sc); 346227614Sluigi return 0; 347227614Sluigi} 348227614Sluigi 349228276Sluigi/* 350228276Sluigi * Additional routines to init the tx and rx rings. 351228276Sluigi * In other drivers we do that inline in the main code. 352228276Sluigi */ 353227614Sluigistatic void 354227614Sluigire_netmap_tx_init(struct rl_softc *sc) 355227614Sluigi{ 356227614Sluigi struct rl_txdesc *txd; 357227614Sluigi struct rl_desc *desc; 358228276Sluigi int i, n; 359227614Sluigi struct netmap_adapter *na = NA(sc->rl_ifp); 360227614Sluigi struct netmap_slot *slot = netmap_reset(na, NR_TX, 0, 0); 361227614Sluigi 362227614Sluigi /* slot is NULL if we are not in netmap mode */ 363227614Sluigi if (!slot) 364227614Sluigi return; 365227614Sluigi /* in netmap mode, overwrite addresses and maps */ 366227614Sluigi txd = sc->rl_ldata.rl_tx_desc; 367227614Sluigi desc = sc->rl_ldata.rl_tx_list; 368228276Sluigi n = sc->rl_ldata.rl_tx_desc_cnt; 369227614Sluigi 370228276Sluigi /* l points in the netmap ring, i points in the NIC ring */ 371228276Sluigi for (i = 0; i < n; i++) { 372228276Sluigi void *addr; 373228276Sluigi uint64_t paddr; 374228276Sluigi struct netmap_kring *kring = &na->tx_rings[0]; 375228276Sluigi int l = i + kring->nkr_hwofs; 376227614Sluigi 377228276Sluigi if (l >= n) 378228276Sluigi l -= n; 379228276Sluigi 380229939Sluigi addr = PNMB(slot + l, &paddr); 381227614Sluigi desc[i].rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr)); 382227614Sluigi desc[i].rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr)); 383227614Sluigi netmap_load_map(sc->rl_ldata.rl_tx_mtag, 384229939Sluigi txd[i].tx_dmamap, addr); 385227614Sluigi } 386227614Sluigi} 387227614Sluigi 388227614Sluigistatic void 389227614Sluigire_netmap_rx_init(struct rl_softc *sc) 390227614Sluigi{ 391227614Sluigi struct netmap_adapter *na = NA(sc->rl_ifp); 392227614Sluigi struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0); 393227614Sluigi struct rl_desc *desc = sc->rl_ldata.rl_rx_list; 394227614Sluigi uint32_t cmdstat; 395228276Sluigi int i, n; 396227614Sluigi 397227614Sluigi if (!slot) 398227614Sluigi return; 399228276Sluigi n = sc->rl_ldata.rl_rx_desc_cnt; 400228276Sluigi for (i = 0; i < n; i++) { 401228276Sluigi void *addr; 402228276Sluigi uint64_t paddr; 403228276Sluigi struct netmap_kring *kring = &na->rx_rings[0]; 404228276Sluigi int l = i + kring->nkr_hwofs; 405227614Sluigi 406228276Sluigi if (l >= n) 407228276Sluigi l -= n; 408227614Sluigi 409229939Sluigi addr = PNMB(slot + l, &paddr); 410229939Sluigi 411229939Sluigi netmap_reload_map(sc->rl_ldata.rl_rx_mtag, 412230055Sluigi sc->rl_ldata.rl_rx_desc[i].rx_dmamap, addr); 413229939Sluigi bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, 414229939Sluigi sc->rl_ldata.rl_rx_desc[i].rx_dmamap, BUS_DMASYNC_PREREAD); 415227614Sluigi desc[i].rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr)); 416227614Sluigi desc[i].rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr)); 417228276Sluigi cmdstat = na->buff_size; 418228276Sluigi if (i == n - 1) 419227614Sluigi cmdstat |= RL_RDESC_CMD_EOR; 420228276Sluigi /* 421228276Sluigi * userspace knows that hwavail packets were ready before the 422228276Sluigi * reset, so we need to tell the NIC that last hwavail 423228276Sluigi * descriptors of the ring are still owned by the driver. 424228276Sluigi */ 425228276Sluigi if (i < n - 1 - kring->nr_hwavail) // XXX + 1 ? 426228276Sluigi cmdstat |= RL_RDESC_CMD_OWN; 427228276Sluigi desc[i].rl_cmdstat = htole32(cmdstat); 428227614Sluigi } 429227614Sluigi} 430