1/* 2 * Copyright (C) 2015, Luigi Rizzo. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26/* 27 * $FreeBSD: stable/11/sys/dev/netmap/if_ixl_netmap.h 359310 2020-03-25 23:06:04Z vmaffione $ 28 * 29 * netmap support for: ixl 30 * 31 * derived from ixgbe 32 * netmap support for a network driver. 33 * This file contains code but only static or inline functions used 34 * by a single driver. To avoid replication of code we just #include 35 * it near the beginning of the standard driver. 36 * For ixl the file is imported in two places, hence the conditional at the 37 * beginning. 38 */ 39 40#include <net/netmap.h> 41#include <sys/selinfo.h> 42 43/* 44 * Some drivers may need the following headers. Others 45 * already include them by default 46 47#include <vm/vm.h> 48#include <vm/pmap.h> 49 50 */ 51#include <dev/netmap/netmap_kern.h> 52 53int ixl_netmap_txsync(struct netmap_kring *kring, int flags); 54int ixl_netmap_rxsync(struct netmap_kring *kring, int flags); 55 56extern int ixl_rx_miss, ixl_rx_miss_bufs, ixl_crcstrip; 57 58#ifdef NETMAP_IXL_MAIN 59/* 60 * device-specific sysctl variables: 61 * 62 * ixl_crcstrip: 0: NIC keeps CRC in rx frames, 1: NIC strips it (default). 63 * During regular operations the CRC is stripped, but on some 64 * hardware reception of frames not multiple of 64 is slower, 65 * so using crcstrip=0 helps in benchmarks. 66 * 67 * ixl_rx_miss, ixl_rx_miss_bufs: 68 * count packets that might be missed due to lost interrupts. 69 */ 70int ixl_rx_miss, ixl_rx_miss_bufs, ixl_crcstrip = 1; 71SYSCTL_DECL(_dev_netmap); 72/* 73 * The xl driver by default strips CRCs and we do not override it. 74 */ 75#if 0 76SYSCTL_INT(_dev_netmap, OID_AUTO, ixl_crcstrip, 77 CTLFLAG_RW, &ixl_crcstrip, 1, "NIC strips CRC on rx frames"); 78#endif 79SYSCTL_INT(_dev_netmap, OID_AUTO, ixl_rx_miss, 80 CTLFLAG_RW, &ixl_rx_miss, 0, "potentially missed rx intr"); 81SYSCTL_INT(_dev_netmap, OID_AUTO, ixl_rx_miss_bufs, 82 CTLFLAG_RW, &ixl_rx_miss_bufs, 0, "potentially missed rx intr bufs"); 83 84 85/* 86 * Register/unregister. We are already under netmap lock. 87 * Only called on the first register or the last unregister. 88 */ 89static int 90ixl_netmap_reg(struct netmap_adapter *na, int onoff) 91{ 92 struct ifnet *ifp = na->ifp; 93 struct ixl_vsi *vsi = ifp->if_softc; 94 struct ixl_pf *pf = (struct ixl_pf *)vsi->back; 95 96 IXL_PF_LOCK(pf); 97 ixl_disable_intr(vsi); 98 99 /* Tell the stack that the interface is no longer active */ 100 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 101 102 //set_crcstrip(&adapter->hw, onoff); 103 /* enable or disable flags and callbacks in na and ifp */ 104 if (onoff) { 105 nm_set_native_flags(na); 106 } else { 107 nm_clear_native_flags(na); 108 } 109 ixl_init_locked(pf); /* also enables intr */ 110 //set_crcstrip(&adapter->hw, onoff); // XXX why twice ? 111 IXL_PF_UNLOCK(pf); 112 return (ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1); 113} 114 115 116/* 117 * The attach routine, called near the end of ixl_attach(), 118 * fills the parameters for netmap_attach() and calls it. 119 * It cannot fail, in the worst case (such as no memory) 120 * netmap mode will be disabled and the driver will only 121 * operate in standard mode. 122 */ 123static void 124ixl_netmap_attach(struct ixl_vsi *vsi) 125{ 126 struct netmap_adapter na; 127 128 bzero(&na, sizeof(na)); 129 130 na.ifp = vsi->ifp; 131 na.na_flags = NAF_BDG_MAYSLEEP; 132 na.num_tx_desc = vsi->num_tx_desc; 133 na.num_rx_desc = vsi->num_rx_desc; 134 na.nm_txsync = ixl_netmap_txsync; 135 na.nm_rxsync = ixl_netmap_rxsync; 136 na.nm_register = ixl_netmap_reg; 137 na.num_tx_rings = na.num_rx_rings = vsi->num_queues; 138 netmap_attach(&na); 139} 140 141 142#else /* !NETMAP_IXL_MAIN, code for ixl_txrx.c */ 143 144/* 145 * Reconcile kernel and user view of the transmit ring. 146 * 147 * All information is in the kring. 148 * Userspace wants to send packets up to the one before kring->rhead, 149 * kernel knows kring->nr_hwcur is the first unsent packet. 150 * 151 * Here we push packets out (as many as possible), and possibly 152 * reclaim buffers from previously completed transmission. 153 * 154 * The caller (netmap) guarantees that there is only one instance 155 * running at any time. Any interference with other driver 156 * methods should be handled by the individual drivers. 157 */ 158int 159ixl_netmap_txsync(struct netmap_kring *kring, int flags) 160{ 161 struct netmap_adapter *na = kring->na; 162 struct ifnet *ifp = na->ifp; 163 struct netmap_ring *ring = kring->ring; 164 u_int nm_i; /* index into the netmap ring */ 165 u_int nic_i; /* index into the NIC ring */ 166 u_int n; 167 u_int const lim = kring->nkr_num_slots - 1; 168 u_int const head = kring->rhead; 169 /* 170 * interrupts on every tx packet are expensive so request 171 * them every half ring, or where NS_REPORT is set 172 */ 173 u_int report_frequency = kring->nkr_num_slots >> 1; 174 175 /* device-specific */ 176 struct ixl_vsi *vsi = ifp->if_softc; 177 struct ixl_queue *que = &vsi->queues[kring->ring_id]; 178 struct tx_ring *txr = &que->txr; 179 180 bus_dmamap_sync(txr->dma.tag, txr->dma.map, 181 BUS_DMASYNC_POSTREAD); 182 183 /* 184 * First part: process new packets to send. 185 * nm_i is the current index in the netmap ring, 186 * nic_i is the corresponding index in the NIC ring. 187 * 188 * If we have packets to send (nm_i != head) 189 * iterate over the netmap ring, fetch length and update 190 * the corresponding slot in the NIC ring. Some drivers also 191 * need to update the buffer's physical address in the NIC slot 192 * even NS_BUF_CHANGED is not set (PNMB computes the addresses). 193 * 194 * The netmap_reload_map() calls is especially expensive, 195 * even when (as in this case) the tag is 0, so do only 196 * when the buffer has actually changed. 197 * 198 * If possible do not set the report/intr bit on all slots, 199 * but only a few times per ring or when NS_REPORT is set. 200 * 201 * Finally, on 10G and faster drivers, it might be useful 202 * to prefetch the next slot and txr entry. 203 */ 204 205 nm_i = kring->nr_hwcur; 206 if (nm_i != head) { /* we have new packets to send */ 207 nic_i = netmap_idx_k2n(kring, nm_i); 208 209 __builtin_prefetch(&ring->slot[nm_i]); 210 __builtin_prefetch(&txr->buffers[nic_i]); 211 212 for (n = 0; nm_i != head; n++) { 213 struct netmap_slot *slot = &ring->slot[nm_i]; 214 u_int len = slot->len; 215 uint64_t paddr; 216 void *addr = PNMB(na, slot, &paddr); 217 218 /* device-specific */ 219 struct i40e_tx_desc *curr = &txr->base[nic_i]; 220 struct ixl_tx_buf *txbuf = &txr->buffers[nic_i]; 221 u64 flags = (slot->flags & NS_REPORT || 222 nic_i == 0 || nic_i == report_frequency) ? 223 ((u64)I40E_TX_DESC_CMD_RS << I40E_TXD_QW1_CMD_SHIFT) : 0; 224 225 /* prefetch for next round */ 226 __builtin_prefetch(&ring->slot[nm_i + 1]); 227 __builtin_prefetch(&txr->buffers[nic_i + 1]); 228 229 NM_CHECK_ADDR_LEN(na, addr, len); 230 231 if (slot->flags & NS_BUF_CHANGED) { 232 /* buffer has changed, reload map */ 233 netmap_reload_map(na, txr->dma.tag, txbuf->map, addr); 234 } 235 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); 236 237 /* Fill the slot in the NIC ring. */ 238 curr->buffer_addr = htole64(paddr); 239 curr->cmd_type_offset_bsz = htole64( 240 ((u64)len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) | 241 flags | 242 ((u64)I40E_TX_DESC_CMD_EOP << I40E_TXD_QW1_CMD_SHIFT) | 243 ((u64)I40E_TX_DESC_CMD_ICRC << I40E_TXD_QW1_CMD_SHIFT) 244 ); // XXX more ? 245 246 /* make sure changes to the buffer are synced */ 247 bus_dmamap_sync(txr->dma.tag, txbuf->map, 248 BUS_DMASYNC_PREWRITE); 249 250 nm_i = nm_next(nm_i, lim); 251 nic_i = nm_next(nic_i, lim); 252 } 253 kring->nr_hwcur = head; 254 255 /* synchronize the NIC ring */ 256 bus_dmamap_sync(txr->dma.tag, txr->dma.map, 257 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 258 259 /* (re)start the tx unit up to slot nic_i (excluded) */ 260 wr32(vsi->hw, txr->tail, nic_i); 261 } 262 263 /* 264 * Second part: reclaim buffers for completed transmissions. 265 */ 266 nic_i = LE32_TO_CPU(*(volatile __le32 *)&txr->base[que->num_tx_desc]); 267 if (unlikely(nic_i >= que->num_tx_desc)) { 268 nm_prerr("error: invalid value of hw head index %u", nic_i); 269 } else if (nic_i != txr->next_to_clean) { 270 /* some tx completed, increment avail */ 271 txr->next_to_clean = nic_i; 272 kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim); 273 } 274 275 return 0; 276} 277 278 279/* 280 * Reconcile kernel and user view of the receive ring. 281 * Same as for the txsync, this routine must be efficient. 282 * The caller guarantees a single invocations, but races against 283 * the rest of the driver should be handled here. 284 * 285 * On call, kring->rhead is the first packet that userspace wants 286 * to keep, and kring->rcur is the wakeup point. 287 * The kernel has previously reported packets up to kring->rtail. 288 * 289 * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective 290 * of whether or not we received an interrupt. 291 */ 292int 293ixl_netmap_rxsync(struct netmap_kring *kring, int flags) 294{ 295 struct netmap_adapter *na = kring->na; 296 struct ifnet *ifp = na->ifp; 297 struct netmap_ring *ring = kring->ring; 298 u_int nm_i; /* index into the netmap ring */ 299 u_int nic_i; /* index into the NIC ring */ 300 u_int n; 301 u_int const lim = kring->nkr_num_slots - 1; 302 u_int const head = kring->rhead; 303 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; 304 305 /* device-specific */ 306 struct ixl_vsi *vsi = ifp->if_softc; 307 struct ixl_queue *que = &vsi->queues[kring->ring_id]; 308 struct rx_ring *rxr = &que->rxr; 309 310 if (head > lim) 311 return netmap_ring_reinit(kring); 312 313 /* XXX check sync modes */ 314 bus_dmamap_sync(rxr->dma.tag, rxr->dma.map, 315 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 316 317 /* 318 * First part: import newly received packets. 319 * 320 * nm_i is the index of the next free slot in the netmap ring, 321 * nic_i is the index of the next received packet in the NIC ring, 322 * and they may differ in case if_init() has been called while 323 * in netmap mode. For the receive ring we have 324 * 325 * nic_i = rxr->next_check; 326 * nm_i = kring->nr_hwtail (previous) 327 * and 328 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size 329 * 330 * rxr->next_check is set to 0 on a ring reinit 331 */ 332 if (netmap_no_pendintr || force_update) { 333 int crclen = ixl_crcstrip ? 0 : 4; 334 335 nic_i = rxr->next_check; // or also k2n(kring->nr_hwtail) 336 nm_i = netmap_idx_n2k(kring, nic_i); 337 338 for (n = 0; ; n++) { 339 union i40e_32byte_rx_desc *curr = &rxr->base[nic_i]; 340 uint64_t qword = le64toh(curr->wb.qword1.status_error_len); 341 uint32_t staterr = (qword & I40E_RXD_QW1_STATUS_MASK) 342 >> I40E_RXD_QW1_STATUS_SHIFT; 343 344 if ((staterr & (1<<I40E_RX_DESC_STATUS_DD_SHIFT)) == 0) 345 break; 346 ring->slot[nm_i].len = ((qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) 347 >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - crclen; 348 ring->slot[nm_i].flags = 0; 349 bus_dmamap_sync(rxr->ptag, 350 rxr->buffers[nic_i].pmap, BUS_DMASYNC_POSTREAD); 351 nm_i = nm_next(nm_i, lim); 352 nic_i = nm_next(nic_i, lim); 353 } 354 if (n) { /* update the state variables */ 355 if (netmap_no_pendintr && !force_update) { 356 /* diagnostics */ 357 ixl_rx_miss ++; 358 ixl_rx_miss_bufs += n; 359 } 360 rxr->next_check = nic_i; 361 kring->nr_hwtail = nm_i; 362 } 363 kring->nr_kflags &= ~NKR_PENDINTR; 364 } 365 366 /* 367 * Second part: skip past packets that userspace has released. 368 * (kring->nr_hwcur to head excluded), 369 * and make the buffers available for reception. 370 * As usual nm_i is the index in the netmap ring, 371 * nic_i is the index in the NIC ring, and 372 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size 373 */ 374 nm_i = kring->nr_hwcur; 375 if (nm_i != head) { 376 nic_i = netmap_idx_k2n(kring, nm_i); 377 for (n = 0; nm_i != head; n++) { 378 struct netmap_slot *slot = &ring->slot[nm_i]; 379 uint64_t paddr; 380 void *addr = PNMB(na, slot, &paddr); 381 382 union i40e_32byte_rx_desc *curr = &rxr->base[nic_i]; 383 struct ixl_rx_buf *rxbuf = &rxr->buffers[nic_i]; 384 385 if (addr == NETMAP_BUF_BASE(na)) /* bad buf */ 386 goto ring_reset; 387 388 if (slot->flags & NS_BUF_CHANGED) { 389 /* buffer has changed, reload map */ 390 netmap_reload_map(na, rxr->ptag, rxbuf->pmap, addr); 391 slot->flags &= ~NS_BUF_CHANGED; 392 } 393 curr->read.pkt_addr = htole64(paddr); 394 curr->read.hdr_addr = 0; // XXX needed 395 bus_dmamap_sync(rxr->ptag, rxbuf->pmap, 396 BUS_DMASYNC_PREREAD); 397 nm_i = nm_next(nm_i, lim); 398 nic_i = nm_next(nic_i, lim); 399 } 400 kring->nr_hwcur = head; 401 402 bus_dmamap_sync(rxr->dma.tag, rxr->dma.map, 403 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 404 /* 405 * IMPORTANT: we must leave one free slot in the ring, 406 * so move nic_i back by one unit 407 */ 408 nic_i = nm_prev(nic_i, lim); 409 wr32(vsi->hw, rxr->tail, nic_i); 410 } 411 412 return 0; 413 414ring_reset: 415 return netmap_ring_reinit(kring); 416} 417 418#endif /* !NETMAP_IXL_MAIN */ 419 420/* end of file */ 421