1/* 2 * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 27/* 28 * $FreeBSD: stable/11/sys/dev/netmap/if_lem_netmap.h 343771 2019-02-05 10:33:22Z vmaffione $ 29 * 30 * netmap support for: lem 31 * 32 * For details on netmap support please see ixgbe_netmap.h 33 */ 34 35 36#include <net/netmap.h> 37#include <sys/selinfo.h> 38#include <dev/netmap/netmap_kern.h> 39 40/* 41 * Register/unregister. We are already under netmap lock. 42 */ 43static int 44lem_netmap_reg(struct netmap_adapter *na, int onoff) 45{ 46 struct ifnet *ifp = na->ifp; 47 struct adapter *adapter = ifp->if_softc; 48 49 EM_CORE_LOCK(adapter); 50 51 lem_disable_intr(adapter); 52 53 /* Tell the stack that the interface is no longer active */ 54 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 55 56#ifndef EM_LEGACY_IRQ // XXX do we need this ? 57 taskqueue_block(adapter->tq); 58 taskqueue_drain(adapter->tq, &adapter->rxtx_task); 59 taskqueue_drain(adapter->tq, &adapter->link_task); 60#endif /* !EM_LEGCY_IRQ */ 61 62 /* enable or disable flags and callbacks in na and ifp */ 63 if (onoff) { 64 nm_set_native_flags(na); 65 } else { 66 nm_clear_native_flags(na); 67 } 68 lem_init_locked(adapter); /* also enable intr */ 69 70#ifndef EM_LEGACY_IRQ 71 taskqueue_unblock(adapter->tq); // XXX do we need this ? 72#endif /* !EM_LEGCY_IRQ */ 73 74 EM_CORE_UNLOCK(adapter); 75 76 return (ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1); 77} 78 79 80static void 81lem_netmap_intr(struct netmap_adapter *na, int onoff) 82{ 83 struct ifnet *ifp = na->ifp; 84 struct adapter *adapter = ifp->if_softc; 85 86 EM_CORE_LOCK(adapter); 87 if (onoff) { 88 lem_enable_intr(adapter); 89 } else { 90 lem_disable_intr(adapter); 91 } 92 EM_CORE_UNLOCK(adapter); 93} 94 95 96/* 97 * Reconcile kernel and user view of the transmit ring. 98 */ 99static int 100lem_netmap_txsync(struct netmap_kring *kring, int flags) 101{ 102 struct netmap_adapter *na = kring->na; 103 struct ifnet *ifp = na->ifp; 104 struct netmap_ring *ring = kring->ring; 105 u_int nm_i; /* index into the netmap ring */ 106 u_int nic_i; /* index into the NIC ring */ 107 u_int const lim = kring->nkr_num_slots - 1; 108 u_int const head = kring->rhead; 109 /* generate an interrupt approximately every half ring */ 110 u_int report_frequency = kring->nkr_num_slots >> 1; 111 112 /* device-specific */ 113 struct adapter *adapter = ifp->if_softc; 114 115 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map, 116 BUS_DMASYNC_POSTREAD); 117 118 /* 119 * First part: process new packets to send. 120 */ 121 122 nm_i = kring->nr_hwcur; 123 if (nm_i != head) { /* we have new packets to send */ 124 nic_i = netmap_idx_k2n(kring, nm_i); 125 while (nm_i != head) { 126 struct netmap_slot *slot = &ring->slot[nm_i]; 127 u_int len = slot->len; 128 uint64_t paddr; 129 void *addr = PNMB(na, slot, &paddr); 130 131 /* device-specific */ 132 struct e1000_tx_desc *curr = &adapter->tx_desc_base[nic_i]; 133 struct em_buffer *txbuf = &adapter->tx_buffer_area[nic_i]; 134 int flags = (slot->flags & NS_REPORT || 135 nic_i == 0 || nic_i == report_frequency) ? 136 E1000_TXD_CMD_RS : 0; 137 138 NM_CHECK_ADDR_LEN(na, addr, len); 139 140 if (slot->flags & NS_BUF_CHANGED) { 141 /* buffer has changed, reload map */ 142 curr->buffer_addr = htole64(paddr); 143 netmap_reload_map(na, adapter->txtag, txbuf->map, addr); 144 } 145 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); 146 147 /* Fill the slot in the NIC ring. */ 148 curr->upper.data = 0; 149 curr->lower.data = htole32(adapter->txd_cmd | len | 150 (E1000_TXD_CMD_EOP | flags) ); 151 bus_dmamap_sync(adapter->txtag, txbuf->map, 152 BUS_DMASYNC_PREWRITE); 153 154 nm_i = nm_next(nm_i, lim); 155 nic_i = nm_next(nic_i, lim); 156 // XXX might try an early kick 157 } 158 kring->nr_hwcur = head; 159 160 /* synchronize the NIC ring */ 161 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map, 162 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 163 164 /* (re)start the tx unit up to slot nic_i (excluded) */ 165 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), nic_i); 166 } 167 168 /* 169 * Second part: reclaim buffers for completed transmissions. 170 */ 171 if (ticks != kring->last_reclaim || flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) { 172 kring->last_reclaim = ticks; 173 /* record completed transmissions using TDH */ 174 nic_i = E1000_READ_REG(&adapter->hw, E1000_TDH(0)); 175 if (unlikely(nic_i >= kring->nkr_num_slots)) { 176 nm_prerr("TDH wrap at idx %d", nic_i); 177 nic_i -= kring->nkr_num_slots; 178 } 179 adapter->next_tx_to_clean = nic_i; 180 kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim); 181 } 182 183 return 0; 184} 185 186 187/* 188 * Reconcile kernel and user view of the receive ring. 189 */ 190static int 191lem_netmap_rxsync(struct netmap_kring *kring, int flags) 192{ 193 struct netmap_adapter *na = kring->na; 194 struct ifnet *ifp = na->ifp; 195 struct netmap_ring *ring = kring->ring; 196 u_int nm_i; /* index into the netmap ring */ 197 u_int nic_i; /* index into the NIC ring */ 198 u_int n; 199 u_int const lim = kring->nkr_num_slots - 1; 200 u_int const head = kring->rhead; 201 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; 202 203 /* device-specific */ 204 struct adapter *adapter = ifp->if_softc; 205 206 if (head > lim) 207 return netmap_ring_reinit(kring); 208 209 /* XXX check sync modes */ 210 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map, 211 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 212 213 /* 214 * First part: import newly received packets. 215 */ 216 if (netmap_no_pendintr || force_update) { 217 nic_i = adapter->next_rx_desc_to_check; 218 nm_i = netmap_idx_n2k(kring, nic_i); 219 220 for (n = 0; ; n++) { 221 struct e1000_rx_desc *curr = &adapter->rx_desc_base[nic_i]; 222 uint32_t staterr = le32toh(curr->status); 223 int len; 224 225 if ((staterr & E1000_RXD_STAT_DD) == 0) 226 break; 227 len = le16toh(curr->length) - 4; // CRC 228 if (len < 0) { 229 nm_prlim(2, "bogus pkt (%d) size %d nic idx %d", n, len, nic_i); 230 len = 0; 231 } 232 ring->slot[nm_i].len = len; 233 ring->slot[nm_i].flags = 0; 234 bus_dmamap_sync(adapter->rxtag, 235 adapter->rx_buffer_area[nic_i].map, 236 BUS_DMASYNC_POSTREAD); 237 nm_i = nm_next(nm_i, lim); 238 nic_i = nm_next(nic_i, lim); 239 } 240 if (n) { /* update the state variables */ 241 nm_prdis("%d new packets at nic %d nm %d tail %d", 242 n, 243 adapter->next_rx_desc_to_check, 244 netmap_idx_n2k(kring, adapter->next_rx_desc_to_check), 245 kring->nr_hwtail); 246 adapter->next_rx_desc_to_check = nic_i; 247 // if_inc_counter(ifp, IFCOUNTER_IPACKETS, n); 248 kring->nr_hwtail = nm_i; 249 } 250 kring->nr_kflags &= ~NKR_PENDINTR; 251 } 252 253 /* 254 * Second part: skip past packets that userspace has released. 255 */ 256 nm_i = kring->nr_hwcur; 257 if (nm_i != head) { 258 nic_i = netmap_idx_k2n(kring, nm_i); 259 for (n = 0; nm_i != head; n++) { 260 struct netmap_slot *slot = &ring->slot[nm_i]; 261 uint64_t paddr; 262 void *addr = PNMB(na, slot, &paddr); 263 264 struct e1000_rx_desc *curr = &adapter->rx_desc_base[nic_i]; 265 struct em_buffer *rxbuf = &adapter->rx_buffer_area[nic_i]; 266 267 if (addr == NETMAP_BUF_BASE(na)) /* bad buf */ 268 goto ring_reset; 269 270 if (slot->flags & NS_BUF_CHANGED) { 271 /* buffer has changed, reload map */ 272 curr->buffer_addr = htole64(paddr); 273 netmap_reload_map(na, adapter->rxtag, rxbuf->map, addr); 274 slot->flags &= ~NS_BUF_CHANGED; 275 } 276 curr->status = 0; 277 bus_dmamap_sync(adapter->rxtag, rxbuf->map, 278 BUS_DMASYNC_PREREAD); 279 nm_i = nm_next(nm_i, lim); 280 nic_i = nm_next(nic_i, lim); 281 } 282 kring->nr_hwcur = head; 283 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map, 284 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 285 /* 286 * IMPORTANT: we must leave one free slot in the ring, 287 * so move nic_i back by one unit 288 */ 289 nic_i = nm_prev(nic_i, lim); 290 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), nic_i); 291 } 292 293 return 0; 294 295ring_reset: 296 return netmap_ring_reinit(kring); 297} 298 299 300static void 301lem_netmap_attach(struct adapter *adapter) 302{ 303 struct netmap_adapter na; 304 305 bzero(&na, sizeof(na)); 306 307 na.ifp = adapter->ifp; 308 na.na_flags = NAF_BDG_MAYSLEEP; 309 na.num_tx_desc = adapter->num_tx_desc; 310 na.num_rx_desc = adapter->num_rx_desc; 311 na.nm_txsync = lem_netmap_txsync; 312 na.nm_rxsync = lem_netmap_rxsync; 313 na.nm_register = lem_netmap_reg; 314 na.num_tx_rings = na.num_rx_rings = 1; 315 na.nm_intr = lem_netmap_intr; 316 netmap_attach(&na); 317} 318 319/* end of file */ 320