1/* 2 * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26/* 27 * $FreeBSD: stable/11/sys/dev/netmap/if_em_netmap.h 343771 2019-02-05 10:33:22Z vmaffione $ 28 * 29 * netmap support for: em. 30 * 31 * For more details on netmap support please see ixgbe_netmap.h 32 */ 33 34 35#include <net/netmap.h> 36#include <sys/selinfo.h> 37#include <vm/vm.h> 38#include <vm/pmap.h> /* vtophys ? */ 39#include <dev/netmap/netmap_kern.h> 40 41 42// XXX do we need to block/unblock the tasks ? 43static void 44em_netmap_block_tasks(struct adapter *adapter) 45{ 46 if (adapter->msix > 1) { /* MSIX */ 47 int i; 48 struct tx_ring *txr = adapter->tx_rings; 49 struct rx_ring *rxr = adapter->rx_rings; 50 51 for (i = 0; i < adapter->num_queues; i++, txr++, rxr++) { 52 taskqueue_block(txr->tq); 53 taskqueue_drain(txr->tq, &txr->tx_task); 54 taskqueue_block(rxr->tq); 55 taskqueue_drain(rxr->tq, &rxr->rx_task); 56 } 57 } else { /* legacy */ 58 taskqueue_block(adapter->tq); 59 taskqueue_drain(adapter->tq, &adapter->link_task); 60 taskqueue_drain(adapter->tq, &adapter->que_task); 61 } 62} 63 64 65static void 66em_netmap_unblock_tasks(struct adapter *adapter) 67{ 68 if (adapter->msix > 1) { 69 struct tx_ring *txr = adapter->tx_rings; 70 struct rx_ring *rxr = adapter->rx_rings; 71 int i; 72 73 for (i = 0; i < adapter->num_queues; i++, txr++, rxr++) { 74 taskqueue_unblock(txr->tq); 75 taskqueue_unblock(rxr->tq); 76 } 77 } else { /* legacy */ 78 taskqueue_unblock(adapter->tq); 79 } 80} 81 82 83/* 84 * Register/unregister. We are already under netmap lock. 85 */ 86static int 87em_netmap_reg(struct netmap_adapter *na, int onoff) 88{ 89 struct ifnet *ifp = na->ifp; 90 struct adapter *adapter = ifp->if_softc; 91 92 EM_CORE_LOCK(adapter); 93 em_disable_intr(adapter); 94 95 /* Tell the stack that the interface is no longer active */ 96 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 97 98 em_netmap_block_tasks(adapter); 99 /* enable or disable flags and callbacks in na and ifp */ 100 if (onoff) { 101 nm_set_native_flags(na); 102 } else { 103 nm_clear_native_flags(na); 104 } 105 em_init_locked(adapter); /* also enable intr */ 106 em_netmap_unblock_tasks(adapter); 107 EM_CORE_UNLOCK(adapter); 108 return (ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1); 109} 110 111 112/* 113 * Reconcile kernel and user view of the transmit ring. 114 */ 115static int 116em_netmap_txsync(struct netmap_kring *kring, int flags) 117{ 118 struct netmap_adapter *na = kring->na; 119 struct ifnet *ifp = na->ifp; 120 struct netmap_ring *ring = kring->ring; 121 u_int nm_i; /* index into the netmap ring */ 122 u_int nic_i; /* index into the NIC ring */ 123 u_int n; 124 u_int const lim = kring->nkr_num_slots - 1; 125 u_int const head = kring->rhead; 126 /* generate an interrupt approximately every half ring */ 127 u_int report_frequency = kring->nkr_num_slots >> 1; 128 129 /* device-specific */ 130 struct adapter *adapter = ifp->if_softc; 131 struct tx_ring *txr = &adapter->tx_rings[kring->ring_id]; 132 133 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 134 BUS_DMASYNC_POSTREAD); 135 136 /* 137 * First part: process new packets to send. 138 */ 139 140 nm_i = kring->nr_hwcur; 141 if (nm_i != head) { /* we have new packets to send */ 142 nic_i = netmap_idx_k2n(kring, nm_i); 143 for (n = 0; nm_i != head; n++) { 144 struct netmap_slot *slot = &ring->slot[nm_i]; 145 u_int len = slot->len; 146 uint64_t paddr; 147 void *addr = PNMB(na, slot, &paddr); 148 149 /* device-specific */ 150 struct e1000_tx_desc *curr = &txr->tx_base[nic_i]; 151 struct em_txbuffer *txbuf = &txr->tx_buffers[nic_i]; 152 int flags = (slot->flags & NS_REPORT || 153 nic_i == 0 || nic_i == report_frequency) ? 154 E1000_TXD_CMD_RS : 0; 155 156 NM_CHECK_ADDR_LEN(na, addr, len); 157 158 if (slot->flags & NS_BUF_CHANGED) { 159 curr->buffer_addr = htole64(paddr); 160 /* buffer has changed, reload map */ 161 netmap_reload_map(na, txr->txtag, txbuf->map, addr); 162 } 163 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); 164 165 /* Fill the slot in the NIC ring. */ 166 curr->upper.data = 0; 167 curr->lower.data = htole32(adapter->txd_cmd | len | 168 (E1000_TXD_CMD_EOP | flags) ); 169 bus_dmamap_sync(txr->txtag, txbuf->map, 170 BUS_DMASYNC_PREWRITE); 171 172 nm_i = nm_next(nm_i, lim); 173 nic_i = nm_next(nic_i, lim); 174 } 175 kring->nr_hwcur = head; 176 177 /* synchronize the NIC ring */ 178 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 179 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 180 181 /* (re)start the tx unit up to slot nic_i (excluded) */ 182 E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), nic_i); 183 } 184 185 /* 186 * Second part: reclaim buffers for completed transmissions. 187 */ 188 if (flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) { 189 /* record completed transmissions using TDH */ 190 nic_i = E1000_READ_REG(&adapter->hw, E1000_TDH(kring->ring_id)); 191 if (unlikely(nic_i >= kring->nkr_num_slots)) { 192 nm_prerr("TDH wrap at idx %d", nic_i); 193 nic_i -= kring->nkr_num_slots; 194 } 195 if (nic_i != txr->next_to_clean) { 196 txr->next_to_clean = nic_i; 197 kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim); 198 } 199 } 200 201 return 0; 202} 203 204 205/* 206 * Reconcile kernel and user view of the receive ring. 207 */ 208static int 209em_netmap_rxsync(struct netmap_kring *kring, int flags) 210{ 211 struct netmap_adapter *na = kring->na; 212 struct ifnet *ifp = na->ifp; 213 struct netmap_ring *ring = kring->ring; 214 u_int nm_i; /* index into the netmap ring */ 215 u_int nic_i; /* index into the NIC ring */ 216 u_int n; 217 u_int const lim = kring->nkr_num_slots - 1; 218 u_int const head = kring->rhead; 219 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; 220 221 /* device-specific */ 222 struct adapter *adapter = ifp->if_softc; 223 struct rx_ring *rxr = &adapter->rx_rings[kring->ring_id]; 224 225 if (head > lim) 226 return netmap_ring_reinit(kring); 227 228 /* XXX check sync modes */ 229 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 230 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 231 232 /* 233 * First part: import newly received packets. 234 */ 235 if (netmap_no_pendintr || force_update) { 236 nic_i = rxr->next_to_check; 237 nm_i = netmap_idx_n2k(kring, nic_i); 238 239 for (n = 0; ; n++) { // XXX no need to count 240 union e1000_rx_desc_extended *curr = &rxr->rx_base[nic_i]; 241 uint32_t staterr = le32toh(curr->wb.upper.status_error); 242 243 if ((staterr & E1000_RXD_STAT_DD) == 0) 244 break; 245 ring->slot[nm_i].len = le16toh(curr->wb.upper.length); 246 ring->slot[nm_i].flags = 0; 247 bus_dmamap_sync(rxr->rxtag, rxr->rx_buffers[nic_i].map, 248 BUS_DMASYNC_POSTREAD); 249 nm_i = nm_next(nm_i, lim); 250 /* make sure next_to_refresh follows next_to_check */ 251 rxr->next_to_refresh = nic_i; // XXX 252 nic_i = nm_next(nic_i, lim); 253 } 254 if (n) { /* update the state variables */ 255 rxr->next_to_check = nic_i; 256 kring->nr_hwtail = nm_i; 257 } 258 kring->nr_kflags &= ~NKR_PENDINTR; 259 } 260 261 /* 262 * Second part: skip past packets that userspace has released. 263 */ 264 nm_i = kring->nr_hwcur; 265 if (nm_i != head) { 266 nic_i = netmap_idx_k2n(kring, nm_i); 267 for (n = 0; nm_i != head; n++) { 268 struct netmap_slot *slot = &ring->slot[nm_i]; 269 uint64_t paddr; 270 void *addr = PNMB(na, slot, &paddr); 271 272 union e1000_rx_desc_extended *curr = &rxr->rx_base[nic_i]; 273 struct em_rxbuffer *rxbuf = &rxr->rx_buffers[nic_i]; 274 275 if (addr == NETMAP_BUF_BASE(na)) /* bad buf */ 276 goto ring_reset; 277 278 curr->read.buffer_addr = htole64(paddr); 279 if (slot->flags & NS_BUF_CHANGED) { 280 /* buffer has changed, reload map */ 281 netmap_reload_map(na, rxr->rxtag, rxbuf->map, addr); 282 slot->flags &= ~NS_BUF_CHANGED; 283 } 284 curr->wb.upper.status_error = 0; 285 bus_dmamap_sync(rxr->rxtag, rxbuf->map, 286 BUS_DMASYNC_PREREAD); 287 nm_i = nm_next(nm_i, lim); 288 nic_i = nm_next(nic_i, lim); 289 } 290 kring->nr_hwcur = head; 291 292 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 293 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 294 /* 295 * IMPORTANT: we must leave one free slot in the ring, 296 * so move nic_i back by one unit 297 */ 298 nic_i = nm_prev(nic_i, lim); 299 E1000_WRITE_REG(&adapter->hw, E1000_RDT(rxr->me), nic_i); 300 } 301 302 return 0; 303 304ring_reset: 305 return netmap_ring_reinit(kring); 306} 307 308 309static void 310em_netmap_attach(struct adapter *adapter) 311{ 312 struct netmap_adapter na; 313 314 bzero(&na, sizeof(na)); 315 316 na.ifp = adapter->ifp; 317 na.na_flags = NAF_BDG_MAYSLEEP; 318 na.num_tx_desc = adapter->num_tx_desc; 319 na.num_rx_desc = adapter->num_rx_desc; 320 na.nm_txsync = em_netmap_txsync; 321 na.nm_rxsync = em_netmap_rxsync; 322 na.nm_register = em_netmap_reg; 323 na.num_tx_rings = na.num_rx_rings = adapter->num_queues; 324 netmap_attach(&na); 325} 326 327/* end of file */ 328