1/*- 2 * Copyright (c) 2014 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: stable/11/sys/dev/cxgbe/t4_netmap.c 355250 2019-11-30 20:42:18Z np $"); 30 31#include "opt_inet.h" 32#include "opt_inet6.h" 33 34#ifdef DEV_NETMAP 35#include <sys/param.h> 36#include <sys/bus.h> 37#include <sys/eventhandler.h> 38#include <sys/lock.h> 39#include <sys/mbuf.h> 40#include <sys/module.h> 41#include <sys/selinfo.h> 42#include <sys/socket.h> 43#include <sys/sockio.h> 44#include <machine/bus.h> 45#include <net/ethernet.h> 46#include <net/if.h> 47#include <net/if_media.h> 48#include <net/if_var.h> 49#include <net/if_clone.h> 50#include <net/if_types.h> 51#include <net/netmap.h> 52#include <dev/netmap/netmap_kern.h> 53 54#include "common/common.h" 55#include "common/t4_regs.h" 56#include "common/t4_regs_values.h" 57 58extern int fl_pad; /* XXXNM */ 59 60/* 61 * 0 = normal netmap rx 62 * 1 = black hole 63 * 2 = supermassive black hole (buffer packing enabled) 64 */ 65int black_hole = 0; 66SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_black_hole, CTLFLAG_RDTUN, &black_hole, 0, 67 "Sink incoming packets."); 68 69int rx_ndesc = 256; 70SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_rx_ndesc, CTLFLAG_RWTUN, 71 &rx_ndesc, 0, "# of rx descriptors after which the hw cidx is updated."); 72 73int holdoff_tmr_idx = 2; 74SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_holdoff_tmr_idx, CTLFLAG_RWTUN, 75 &holdoff_tmr_idx, 0, "Holdoff timer index for netmap rx queues."); 76 77/* 78 * Congestion drops. 79 * -1: no congestion feedback (not recommended). 80 * 0: backpressure the channel instead of dropping packets right away. 81 * 1: no backpressure, drop packets for the congested queue immediately. 82 */ 83static int nm_cong_drop = 1; 84SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_cong_drop, CTLFLAG_RDTUN, 85 &nm_cong_drop, 0, 86 "Congestion control for netmap rx queues (0 = backpressure, 1 = drop"); 87 88static int 89alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int cong) 90{ 91 int rc, cntxt_id, i; 92 __be32 v; 93 struct adapter *sc = vi->pi->adapter; 94 struct sge_params *sp = &sc->params.sge; 95 struct netmap_adapter *na = NA(vi->ifp); 96 struct fw_iq_cmd c; 97 98 MPASS(na != NULL); 99 MPASS(nm_rxq->iq_desc != NULL); 100 MPASS(nm_rxq->fl_desc != NULL); 101 102 bzero(nm_rxq->iq_desc, vi->qsize_rxq * IQ_ESIZE); 103 bzero(nm_rxq->fl_desc, na->num_rx_desc * EQ_ESIZE + sp->spg_len); 104 105 bzero(&c, sizeof(c)); 106 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 107 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | 108 V_FW_IQ_CMD_VFN(0)); 109 c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART | 110 FW_LEN16(c)); 111 MPASS(!forwarding_intr_to_fwq(sc)); 112 KASSERT(nm_rxq->intr_idx < sc->intr_count, 113 ("%s: invalid direct intr_idx %d", __func__, nm_rxq->intr_idx)); 114 v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx); 115 c.type_to_iqandstindex = htobe32(v | 116 V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | 117 V_FW_IQ_CMD_VIID(vi->viid) | 118 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT)); 119 c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(vi->pi->tx_chan) | 120 F_FW_IQ_CMD_IQGTSMODE | 121 V_FW_IQ_CMD_IQINTCNTTHRESH(0) | 122 V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4)); 123 c.iqsize = htobe16(vi->qsize_rxq); 124 c.iqaddr = htobe64(nm_rxq->iq_ba); 125 if (cong >= 0) { 126 c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN | 127 V_FW_IQ_CMD_FL0CNGCHMAP(cong) | F_FW_IQ_CMD_FL0CONGCIF | 128 F_FW_IQ_CMD_FL0CONGEN); 129 } 130 c.iqns_to_fl0congen |= 131 htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) | 132 F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO | 133 (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) | 134 (black_hole == 2 ? F_FW_IQ_CMD_FL0PACKEN : 0)); 135 c.fl0dcaen_to_fl0cidxfthresh = 136 htobe16(V_FW_IQ_CMD_FL0FBMIN(chip_id(sc) <= CHELSIO_T5 ? 137 X_FETCHBURSTMIN_128B : X_FETCHBURSTMIN_64B_T6) | 138 V_FW_IQ_CMD_FL0FBMAX(chip_id(sc) <= CHELSIO_T5 ? 139 X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B)); 140 c.fl0size = htobe16(na->num_rx_desc / 8 + sp->spg_len / EQ_ESIZE); 141 c.fl0addr = htobe64(nm_rxq->fl_ba); 142 143 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 144 if (rc != 0) { 145 device_printf(sc->dev, 146 "failed to create netmap ingress queue: %d\n", rc); 147 return (rc); 148 } 149 150 nm_rxq->iq_cidx = 0; 151 MPASS(nm_rxq->iq_sidx == vi->qsize_rxq - sp->spg_len / IQ_ESIZE); 152 nm_rxq->iq_gen = F_RSPD_GEN; 153 nm_rxq->iq_cntxt_id = be16toh(c.iqid); 154 nm_rxq->iq_abs_id = be16toh(c.physiqid); 155 cntxt_id = nm_rxq->iq_cntxt_id - sc->sge.iq_start; 156 if (cntxt_id >= sc->sge.niq) { 157 panic ("%s: nm_rxq->iq_cntxt_id (%d) more than the max (%d)", 158 __func__, cntxt_id, sc->sge.niq - 1); 159 } 160 sc->sge.iqmap[cntxt_id] = (void *)nm_rxq; 161 162 nm_rxq->fl_cntxt_id = be16toh(c.fl0id); 163 nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0; 164 MPASS(nm_rxq->fl_sidx == na->num_rx_desc); 165 cntxt_id = nm_rxq->fl_cntxt_id - sc->sge.eq_start; 166 if (cntxt_id >= sc->sge.neq) { 167 panic("%s: nm_rxq->fl_cntxt_id (%d) more than the max (%d)", 168 __func__, cntxt_id, sc->sge.neq - 1); 169 } 170 sc->sge.eqmap[cntxt_id] = (void *)nm_rxq; 171 172 nm_rxq->fl_db_val = V_QID(nm_rxq->fl_cntxt_id) | 173 sc->chip_params->sge_fl_db; 174 175 if (chip_id(sc) >= CHELSIO_T5 && cong >= 0) { 176 uint32_t param, val; 177 178 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 179 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | 180 V_FW_PARAMS_PARAM_YZ(nm_rxq->iq_cntxt_id); 181 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 182 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | 183 V_FW_PARAMS_PARAM_YZ(nm_rxq->iq_cntxt_id); 184 if (cong == 0) 185 val = 1 << 19; 186 else { 187 val = 2 << 19; 188 for (i = 0; i < 4; i++) { 189 if (cong & (1 << i)) 190 val |= 1 << (i << 2); 191 } 192 } 193 194 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 195 if (rc != 0) { 196 /* report error but carry on */ 197 device_printf(sc->dev, 198 "failed to set congestion manager context for " 199 "ingress queue %d: %d\n", nm_rxq->iq_cntxt_id, rc); 200 } 201 } 202 203 t4_write_reg(sc, sc->sge_gts_reg, 204 V_INGRESSQID(nm_rxq->iq_cntxt_id) | 205 V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx))); 206 207 return (rc); 208} 209 210static int 211free_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq) 212{ 213 struct adapter *sc = vi->pi->adapter; 214 int rc; 215 216 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP, 217 nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, 0xffff); 218 if (rc != 0) 219 device_printf(sc->dev, "%s: failed for iq %d, fl %d: %d\n", 220 __func__, nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, rc); 221 return (rc); 222} 223 224static int 225alloc_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq) 226{ 227 int rc, cntxt_id; 228 size_t len; 229 struct adapter *sc = vi->pi->adapter; 230 struct netmap_adapter *na = NA(vi->ifp); 231 struct fw_eq_eth_cmd c; 232 233 MPASS(na != NULL); 234 MPASS(nm_txq->desc != NULL); 235 236 len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len; 237 bzero(nm_txq->desc, len); 238 239 bzero(&c, sizeof(c)); 240 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | 241 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | 242 V_FW_EQ_ETH_CMD_VFN(0)); 243 c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC | 244 F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); 245 c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE | 246 F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid)); 247 c.fetchszm_to_iqid = 248 htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | 249 V_FW_EQ_ETH_CMD_PCIECHN(vi->pi->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO | 250 V_FW_EQ_ETH_CMD_IQID(sc->sge.nm_rxq[nm_txq->iqidx].iq_cntxt_id)); 251 c.dcaen_to_eqsize = 252 htobe32(V_FW_EQ_ETH_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ? 253 X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) | 254 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 255 V_FW_EQ_ETH_CMD_EQSIZE(len / EQ_ESIZE)); 256 c.eqaddr = htobe64(nm_txq->ba); 257 258 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 259 if (rc != 0) { 260 device_printf(vi->dev, 261 "failed to create netmap egress queue: %d\n", rc); 262 return (rc); 263 } 264 265 nm_txq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); 266 cntxt_id = nm_txq->cntxt_id - sc->sge.eq_start; 267 if (cntxt_id >= sc->sge.neq) 268 panic("%s: nm_txq->cntxt_id (%d) more than the max (%d)", __func__, 269 cntxt_id, sc->sge.neq - 1); 270 sc->sge.eqmap[cntxt_id] = (void *)nm_txq; 271 272 nm_txq->pidx = nm_txq->cidx = 0; 273 MPASS(nm_txq->sidx == na->num_tx_desc); 274 nm_txq->equiqidx = nm_txq->equeqidx = nm_txq->dbidx = 0; 275 276 nm_txq->doorbells = sc->doorbells; 277 if (isset(&nm_txq->doorbells, DOORBELL_UDB) || 278 isset(&nm_txq->doorbells, DOORBELL_UDBWC) || 279 isset(&nm_txq->doorbells, DOORBELL_WCWR)) { 280 uint32_t s_qpp = sc->params.sge.eq_s_qpp; 281 uint32_t mask = (1 << s_qpp) - 1; 282 volatile uint8_t *udb; 283 284 udb = sc->udbs_base + UDBS_DB_OFFSET; 285 udb += (nm_txq->cntxt_id >> s_qpp) << PAGE_SHIFT; 286 nm_txq->udb_qid = nm_txq->cntxt_id & mask; 287 if (nm_txq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE) 288 clrbit(&nm_txq->doorbells, DOORBELL_WCWR); 289 else { 290 udb += nm_txq->udb_qid << UDBS_SEG_SHIFT; 291 nm_txq->udb_qid = 0; 292 } 293 nm_txq->udb = (volatile void *)udb; 294 } 295 296 return (rc); 297} 298 299static int 300free_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq) 301{ 302 struct adapter *sc = vi->pi->adapter; 303 int rc; 304 305 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, nm_txq->cntxt_id); 306 if (rc != 0) 307 device_printf(sc->dev, "%s: failed for eq %d: %d\n", __func__, 308 nm_txq->cntxt_id, rc); 309 return (rc); 310} 311 312static int 313cxgbe_netmap_on(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp, 314 struct netmap_adapter *na) 315{ 316 struct netmap_slot *slot; 317 struct sge_nm_rxq *nm_rxq; 318 struct sge_nm_txq *nm_txq; 319 int rc, i, j, hwidx; 320 struct hw_buf_info *hwb; 321 322 ASSERT_SYNCHRONIZED_OP(sc); 323 324 if ((vi->flags & VI_INIT_DONE) == 0 || 325 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 326 return (EAGAIN); 327 328 hwb = &sc->sge.hw_buf_info[0]; 329 for (i = 0; i < SGE_FLBUF_SIZES; i++, hwb++) { 330 if (hwb->size == NETMAP_BUF_SIZE(na)) 331 break; 332 } 333 if (i >= SGE_FLBUF_SIZES) { 334 if_printf(ifp, "no hwidx for netmap buffer size %d.\n", 335 NETMAP_BUF_SIZE(na)); 336 return (ENXIO); 337 } 338 hwidx = i; 339 340 /* Must set caps before calling netmap_reset */ 341 nm_set_native_flags(na); 342 343 for_each_nm_rxq(vi, i, nm_rxq) { 344 alloc_nm_rxq_hwq(vi, nm_rxq, tnl_cong(vi->pi, nm_cong_drop)); 345 nm_rxq->fl_hwidx = hwidx; 346 slot = netmap_reset(na, NR_RX, i, 0); 347 MPASS(slot != NULL); /* XXXNM: error check, not assert */ 348 349 /* We deal with 8 bufs at a time */ 350 MPASS((na->num_rx_desc & 7) == 0); 351 MPASS(na->num_rx_desc == nm_rxq->fl_sidx); 352 for (j = 0; j < nm_rxq->fl_sidx; j++) { 353 uint64_t ba; 354 355 PNMB(na, &slot[j], &ba); 356 MPASS(ba != 0); 357 nm_rxq->fl_desc[j] = htobe64(ba | hwidx); 358 } 359 j = nm_rxq->fl_pidx = nm_rxq->fl_sidx - 8; 360 MPASS((j & 7) == 0); 361 j /= 8; /* driver pidx to hardware pidx */ 362 wmb(); 363 t4_write_reg(sc, sc->sge_kdoorbell_reg, 364 nm_rxq->fl_db_val | V_PIDX(j)); 365 366 (void) atomic_cmpset_int(&nm_rxq->nm_state, NM_OFF, NM_ON); 367 } 368 369 for_each_nm_txq(vi, i, nm_txq) { 370 alloc_nm_txq_hwq(vi, nm_txq); 371 slot = netmap_reset(na, NR_TX, i, 0); 372 MPASS(slot != NULL); /* XXXNM: error check, not assert */ 373 } 374 375 if (vi->nm_rss == NULL) { 376 vi->nm_rss = malloc(vi->rss_size * sizeof(uint16_t), M_CXGBE, 377 M_ZERO | M_WAITOK); 378 } 379 for (i = 0; i < vi->rss_size;) { 380 for_each_nm_rxq(vi, j, nm_rxq) { 381 vi->nm_rss[i++] = nm_rxq->iq_abs_id; 382 if (i == vi->rss_size) 383 break; 384 } 385 } 386 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, 387 vi->nm_rss, vi->rss_size); 388 if (rc != 0) 389 if_printf(ifp, "netmap rss_config failed: %d\n", rc); 390 391 return (rc); 392} 393 394static int 395cxgbe_netmap_off(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp, 396 struct netmap_adapter *na) 397{ 398 int rc, i; 399 struct sge_nm_txq *nm_txq; 400 struct sge_nm_rxq *nm_rxq; 401 402 ASSERT_SYNCHRONIZED_OP(sc); 403 404 if ((vi->flags & VI_INIT_DONE) == 0) 405 return (0); 406 407 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, 408 vi->rss, vi->rss_size); 409 if (rc != 0) 410 if_printf(ifp, "failed to restore RSS config: %d\n", rc); 411 nm_clear_native_flags(na); 412 413 for_each_nm_txq(vi, i, nm_txq) { 414 struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx]; 415 416 /* Wait for hw pidx to catch up ... */ 417 while (be16toh(nm_txq->pidx) != spg->pidx) 418 pause("nmpidx", 1); 419 420 /* ... and then for the cidx. */ 421 while (spg->pidx != spg->cidx) 422 pause("nmcidx", 1); 423 424 free_nm_txq_hwq(vi, nm_txq); 425 } 426 for_each_nm_rxq(vi, i, nm_rxq) { 427 while (!atomic_cmpset_int(&nm_rxq->nm_state, NM_ON, NM_OFF)) 428 pause("nmst", 1); 429 430 free_nm_rxq_hwq(vi, nm_rxq); 431 } 432 433 return (rc); 434} 435 436static int 437cxgbe_netmap_reg(struct netmap_adapter *na, int on) 438{ 439 struct ifnet *ifp = na->ifp; 440 struct vi_info *vi = ifp->if_softc; 441 struct adapter *sc = vi->pi->adapter; 442 int rc; 443 444 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4nmreg"); 445 if (rc != 0) 446 return (rc); 447 if (on) 448 rc = cxgbe_netmap_on(sc, vi, ifp, na); 449 else 450 rc = cxgbe_netmap_off(sc, vi, ifp, na); 451 end_synchronized_op(sc, 0); 452 453 return (rc); 454} 455 456/* How many packets can a single type1 WR carry in n descriptors */ 457static inline int 458ndesc_to_npkt(const int n) 459{ 460 461 MPASS(n > 0 && n <= SGE_MAX_WR_NDESC); 462 463 return (n * 2 - 1); 464} 465#define MAX_NPKT_IN_TYPE1_WR (ndesc_to_npkt(SGE_MAX_WR_NDESC)) 466 467/* Space (in descriptors) needed for a type1 WR that carries n packets */ 468static inline int 469npkt_to_ndesc(const int n) 470{ 471 472 MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR); 473 474 return ((n + 2) / 2); 475} 476 477/* Space (in 16B units) needed for a type1 WR that carries n packets */ 478static inline int 479npkt_to_len16(const int n) 480{ 481 482 MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR); 483 484 return (n * 2 + 1); 485} 486 487#define NMIDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->sidx) 488 489static void 490ring_nm_txq_db(struct adapter *sc, struct sge_nm_txq *nm_txq) 491{ 492 int n; 493 u_int db = nm_txq->doorbells; 494 495 MPASS(nm_txq->pidx != nm_txq->dbidx); 496 497 n = NMIDXDIFF(nm_txq, dbidx); 498 if (n > 1) 499 clrbit(&db, DOORBELL_WCWR); 500 wmb(); 501 502 switch (ffs(db) - 1) { 503 case DOORBELL_UDB: 504 *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n)); 505 break; 506 507 case DOORBELL_WCWR: { 508 volatile uint64_t *dst, *src; 509 510 /* 511 * Queues whose 128B doorbell segment fits in the page do not 512 * use relative qid (udb_qid is always 0). Only queues with 513 * doorbell segments can do WCWR. 514 */ 515 KASSERT(nm_txq->udb_qid == 0 && n == 1, 516 ("%s: inappropriate doorbell (0x%x, %d, %d) for nm_txq %p", 517 __func__, nm_txq->doorbells, n, nm_txq->pidx, nm_txq)); 518 519 dst = (volatile void *)((uintptr_t)nm_txq->udb + 520 UDBS_WR_OFFSET - UDBS_DB_OFFSET); 521 src = (void *)&nm_txq->desc[nm_txq->dbidx]; 522 while (src != (void *)&nm_txq->desc[nm_txq->dbidx + 1]) 523 *dst++ = *src++; 524 wmb(); 525 break; 526 } 527 528 case DOORBELL_UDBWC: 529 *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n)); 530 wmb(); 531 break; 532 533 case DOORBELL_KDB: 534 t4_write_reg(sc, sc->sge_kdoorbell_reg, 535 V_QID(nm_txq->cntxt_id) | V_PIDX(n)); 536 break; 537 } 538 nm_txq->dbidx = nm_txq->pidx; 539} 540 541int lazy_tx_credit_flush = 1; 542 543/* 544 * Write work requests to send 'npkt' frames and ring the doorbell to send them 545 * on their way. No need to check for wraparound. 546 */ 547static void 548cxgbe_nm_tx(struct adapter *sc, struct sge_nm_txq *nm_txq, 549 struct netmap_kring *kring, int npkt, int npkt_remaining, int txcsum) 550{ 551 struct netmap_ring *ring = kring->ring; 552 struct netmap_slot *slot; 553 const u_int lim = kring->nkr_num_slots - 1; 554 struct fw_eth_tx_pkts_wr *wr = (void *)&nm_txq->desc[nm_txq->pidx]; 555 uint16_t len; 556 uint64_t ba; 557 struct cpl_tx_pkt_core *cpl; 558 struct ulptx_sgl *usgl; 559 int i, n; 560 561 while (npkt) { 562 n = min(npkt, MAX_NPKT_IN_TYPE1_WR); 563 len = 0; 564 565 wr = (void *)&nm_txq->desc[nm_txq->pidx]; 566 wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)); 567 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(npkt_to_len16(n))); 568 wr->npkt = n; 569 wr->r3 = 0; 570 wr->type = 1; 571 cpl = (void *)(wr + 1); 572 573 for (i = 0; i < n; i++) { 574 slot = &ring->slot[kring->nr_hwcur]; 575 PNMB(kring->na, slot, &ba); 576 MPASS(ba != 0); 577 578 cpl->ctrl0 = nm_txq->cpl_ctrl0; 579 cpl->pack = 0; 580 cpl->len = htobe16(slot->len); 581 /* 582 * netmap(4) says "netmap does not use features such as 583 * checksum offloading, TCP segmentation offloading, 584 * encryption, VLAN encapsulation/decapsulation, etc." 585 * 586 * So the ncxl interfaces have tx hardware checksumming 587 * disabled by default. But you can override netmap by 588 * enabling IFCAP_TXCSUM on the interface manully. 589 */ 590 cpl->ctrl1 = txcsum ? 0 : 591 htobe64(F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS); 592 593 usgl = (void *)(cpl + 1); 594 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 595 V_ULPTX_NSGE(1)); 596 usgl->len0 = htobe32(slot->len); 597 usgl->addr0 = htobe64(ba); 598 599 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); 600 cpl = (void *)(usgl + 1); 601 MPASS(slot->len + len <= UINT16_MAX); 602 len += slot->len; 603 kring->nr_hwcur = nm_next(kring->nr_hwcur, lim); 604 } 605 wr->plen = htobe16(len); 606 607 npkt -= n; 608 nm_txq->pidx += npkt_to_ndesc(n); 609 MPASS(nm_txq->pidx <= nm_txq->sidx); 610 if (__predict_false(nm_txq->pidx == nm_txq->sidx)) { 611 /* 612 * This routine doesn't know how to write WRs that wrap 613 * around. Make sure it wasn't asked to. 614 */ 615 MPASS(npkt == 0); 616 nm_txq->pidx = 0; 617 } 618 619 if (npkt == 0 && npkt_remaining == 0) { 620 /* All done. */ 621 if (lazy_tx_credit_flush == 0) { 622 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ | 623 F_FW_WR_EQUIQ); 624 nm_txq->equeqidx = nm_txq->pidx; 625 nm_txq->equiqidx = nm_txq->pidx; 626 } 627 ring_nm_txq_db(sc, nm_txq); 628 return; 629 } 630 631 if (NMIDXDIFF(nm_txq, equiqidx) >= nm_txq->sidx / 2) { 632 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ | 633 F_FW_WR_EQUIQ); 634 nm_txq->equeqidx = nm_txq->pidx; 635 nm_txq->equiqidx = nm_txq->pidx; 636 } else if (NMIDXDIFF(nm_txq, equeqidx) >= 64) { 637 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ); 638 nm_txq->equeqidx = nm_txq->pidx; 639 } 640 if (NMIDXDIFF(nm_txq, dbidx) >= 2 * SGE_MAX_WR_NDESC) 641 ring_nm_txq_db(sc, nm_txq); 642 } 643 644 /* Will get called again. */ 645 MPASS(npkt_remaining); 646} 647 648/* How many contiguous free descriptors starting at pidx */ 649static inline int 650contiguous_ndesc_available(struct sge_nm_txq *nm_txq) 651{ 652 653 if (nm_txq->cidx > nm_txq->pidx) 654 return (nm_txq->cidx - nm_txq->pidx - 1); 655 else if (nm_txq->cidx > 0) 656 return (nm_txq->sidx - nm_txq->pidx); 657 else 658 return (nm_txq->sidx - nm_txq->pidx - 1); 659} 660 661static int 662reclaim_nm_tx_desc(struct sge_nm_txq *nm_txq) 663{ 664 struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx]; 665 uint16_t hw_cidx = spg->cidx; /* snapshot */ 666 struct fw_eth_tx_pkts_wr *wr; 667 int n = 0; 668 669 hw_cidx = be16toh(hw_cidx); 670 671 while (nm_txq->cidx != hw_cidx) { 672 wr = (void *)&nm_txq->desc[nm_txq->cidx]; 673 674 MPASS(wr->op_pkd == htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR))); 675 MPASS(wr->type == 1); 676 MPASS(wr->npkt > 0 && wr->npkt <= MAX_NPKT_IN_TYPE1_WR); 677 678 n += wr->npkt; 679 nm_txq->cidx += npkt_to_ndesc(wr->npkt); 680 681 /* 682 * We never sent a WR that wrapped around so the credits coming 683 * back, WR by WR, should never cause the cidx to wrap around 684 * either. 685 */ 686 MPASS(nm_txq->cidx <= nm_txq->sidx); 687 if (__predict_false(nm_txq->cidx == nm_txq->sidx)) 688 nm_txq->cidx = 0; 689 } 690 691 return (n); 692} 693 694static int 695cxgbe_netmap_txsync(struct netmap_kring *kring, int flags) 696{ 697 struct netmap_adapter *na = kring->na; 698 struct ifnet *ifp = na->ifp; 699 struct vi_info *vi = ifp->if_softc; 700 struct adapter *sc = vi->pi->adapter; 701 struct sge_nm_txq *nm_txq = &sc->sge.nm_txq[vi->first_nm_txq + kring->ring_id]; 702 const u_int head = kring->rhead; 703 u_int reclaimed = 0; 704 int n, d, npkt_remaining, ndesc_remaining, txcsum; 705 706 /* 707 * Tx was at kring->nr_hwcur last time around and now we need to advance 708 * to kring->rhead. Note that the driver's pidx moves independent of 709 * netmap's kring->nr_hwcur (pidx counts descriptors and the relation 710 * between descriptors and frames isn't 1:1). 711 */ 712 713 npkt_remaining = head >= kring->nr_hwcur ? head - kring->nr_hwcur : 714 kring->nkr_num_slots - kring->nr_hwcur + head; 715 txcsum = ifp->if_capenable & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6); 716 while (npkt_remaining) { 717 reclaimed += reclaim_nm_tx_desc(nm_txq); 718 ndesc_remaining = contiguous_ndesc_available(nm_txq); 719 /* Can't run out of descriptors with packets still remaining */ 720 MPASS(ndesc_remaining > 0); 721 722 /* # of desc needed to tx all remaining packets */ 723 d = (npkt_remaining / MAX_NPKT_IN_TYPE1_WR) * SGE_MAX_WR_NDESC; 724 if (npkt_remaining % MAX_NPKT_IN_TYPE1_WR) 725 d += npkt_to_ndesc(npkt_remaining % MAX_NPKT_IN_TYPE1_WR); 726 727 if (d <= ndesc_remaining) 728 n = npkt_remaining; 729 else { 730 /* Can't send all, calculate how many can be sent */ 731 n = (ndesc_remaining / SGE_MAX_WR_NDESC) * 732 MAX_NPKT_IN_TYPE1_WR; 733 if (ndesc_remaining % SGE_MAX_WR_NDESC) 734 n += ndesc_to_npkt(ndesc_remaining % SGE_MAX_WR_NDESC); 735 } 736 737 /* Send n packets and update nm_txq->pidx and kring->nr_hwcur */ 738 npkt_remaining -= n; 739 cxgbe_nm_tx(sc, nm_txq, kring, n, npkt_remaining, txcsum); 740 } 741 MPASS(npkt_remaining == 0); 742 MPASS(kring->nr_hwcur == head); 743 MPASS(nm_txq->dbidx == nm_txq->pidx); 744 745 /* 746 * Second part: reclaim buffers for completed transmissions. 747 */ 748 if (reclaimed || flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) { 749 reclaimed += reclaim_nm_tx_desc(nm_txq); 750 kring->nr_hwtail += reclaimed; 751 if (kring->nr_hwtail >= kring->nkr_num_slots) 752 kring->nr_hwtail -= kring->nkr_num_slots; 753 } 754 755 return (0); 756} 757 758static int 759cxgbe_netmap_rxsync(struct netmap_kring *kring, int flags) 760{ 761 struct netmap_adapter *na = kring->na; 762 struct netmap_ring *ring = kring->ring; 763 struct ifnet *ifp = na->ifp; 764 struct vi_info *vi = ifp->if_softc; 765 struct adapter *sc = vi->pi->adapter; 766 struct sge_nm_rxq *nm_rxq = &sc->sge.nm_rxq[vi->first_nm_rxq + kring->ring_id]; 767 u_int const head = kring->rhead; 768 u_int n; 769 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; 770 771 if (black_hole) 772 return (0); /* No updates ever. */ 773 774 if (netmap_no_pendintr || force_update) { 775 kring->nr_hwtail = atomic_load_acq_32(&nm_rxq->fl_cidx); 776 kring->nr_kflags &= ~NKR_PENDINTR; 777 } 778 779 /* Userspace done with buffers from kring->nr_hwcur to head */ 780 n = head >= kring->nr_hwcur ? head - kring->nr_hwcur : 781 kring->nkr_num_slots - kring->nr_hwcur + head; 782 n &= ~7U; 783 if (n > 0) { 784 u_int fl_pidx = nm_rxq->fl_pidx; 785 struct netmap_slot *slot = &ring->slot[fl_pidx]; 786 uint64_t ba; 787 int i, dbinc = 0, hwidx = nm_rxq->fl_hwidx; 788 789 /* 790 * We always deal with 8 buffers at a time. We must have 791 * stopped at an 8B boundary (fl_pidx) last time around and we 792 * must have a multiple of 8B buffers to give to the freelist. 793 */ 794 MPASS((fl_pidx & 7) == 0); 795 MPASS((n & 7) == 0); 796 797 IDXINCR(kring->nr_hwcur, n, kring->nkr_num_slots); 798 IDXINCR(nm_rxq->fl_pidx, n, nm_rxq->fl_sidx); 799 800 while (n > 0) { 801 for (i = 0; i < 8; i++, fl_pidx++, slot++) { 802 PNMB(na, slot, &ba); 803 MPASS(ba != 0); 804 nm_rxq->fl_desc[fl_pidx] = htobe64(ba | hwidx); 805 slot->flags &= ~NS_BUF_CHANGED; 806 MPASS(fl_pidx <= nm_rxq->fl_sidx); 807 } 808 n -= 8; 809 if (fl_pidx == nm_rxq->fl_sidx) { 810 fl_pidx = 0; 811 slot = &ring->slot[0]; 812 } 813 if (++dbinc == 8 && n >= 32) { 814 wmb(); 815 t4_write_reg(sc, sc->sge_kdoorbell_reg, 816 nm_rxq->fl_db_val | V_PIDX(dbinc)); 817 dbinc = 0; 818 } 819 } 820 MPASS(nm_rxq->fl_pidx == fl_pidx); 821 822 if (dbinc > 0) { 823 wmb(); 824 t4_write_reg(sc, sc->sge_kdoorbell_reg, 825 nm_rxq->fl_db_val | V_PIDX(dbinc)); 826 } 827 } 828 829 return (0); 830} 831 832void 833cxgbe_nm_attach(struct vi_info *vi) 834{ 835 struct port_info *pi; 836 struct adapter *sc; 837 struct netmap_adapter na; 838 839 MPASS(vi->nnmrxq > 0); 840 MPASS(vi->ifp != NULL); 841 842 pi = vi->pi; 843 sc = pi->adapter; 844 845 bzero(&na, sizeof(na)); 846 847 na.ifp = vi->ifp; 848 na.na_flags = NAF_BDG_MAYSLEEP; 849 850 /* Netmap doesn't know about the space reserved for the status page. */ 851 na.num_tx_desc = vi->qsize_txq - sc->params.sge.spg_len / EQ_ESIZE; 852 853 /* 854 * The freelist's cidx/pidx drives netmap's rx cidx/pidx. So 855 * num_rx_desc is based on the number of buffers that can be held in the 856 * freelist, and not the number of entries in the iq. (These two are 857 * not exactly the same due to the space taken up by the status page). 858 */ 859 na.num_rx_desc = rounddown(vi->qsize_rxq, 8); 860 na.nm_txsync = cxgbe_netmap_txsync; 861 na.nm_rxsync = cxgbe_netmap_rxsync; 862 na.nm_register = cxgbe_netmap_reg; 863 na.num_tx_rings = vi->nnmtxq; 864 na.num_rx_rings = vi->nnmrxq; 865 netmap_attach(&na); /* This adds IFCAP_NETMAP to if_capabilities */ 866} 867 868void 869cxgbe_nm_detach(struct vi_info *vi) 870{ 871 872 MPASS(vi->nnmrxq > 0); 873 MPASS(vi->ifp != NULL); 874 875 netmap_detach(vi->ifp); 876} 877 878static inline const void * 879unwrap_nm_fw6_msg(const struct cpl_fw6_msg *cpl) 880{ 881 882 MPASS(cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL); 883 884 /* data[0] is RSS header */ 885 return (&cpl->data[1]); 886} 887 888static void 889handle_nm_sge_egr_update(struct adapter *sc, struct ifnet *ifp, 890 const struct cpl_sge_egr_update *egr) 891{ 892 uint32_t oq; 893 struct sge_nm_txq *nm_txq; 894 895 oq = be32toh(egr->opcode_qid); 896 MPASS(G_CPL_OPCODE(oq) == CPL_SGE_EGR_UPDATE); 897 nm_txq = (void *)sc->sge.eqmap[G_EGR_QID(oq) - sc->sge.eq_start]; 898 899 netmap_tx_irq(ifp, nm_txq->nid); 900} 901 902void 903service_nm_rxq(struct sge_nm_rxq *nm_rxq) 904{ 905 struct vi_info *vi = nm_rxq->vi; 906 struct adapter *sc = vi->pi->adapter; 907 struct ifnet *ifp = vi->ifp; 908 struct netmap_adapter *na = NA(ifp); 909 struct netmap_kring *kring = na->rx_rings[nm_rxq->nid]; 910 struct netmap_ring *ring = kring->ring; 911 struct iq_desc *d = &nm_rxq->iq_desc[nm_rxq->iq_cidx]; 912 const void *cpl; 913 uint32_t lq; 914 u_int n = 0, work = 0; 915 uint8_t opcode; 916 uint32_t fl_cidx = atomic_load_acq_32(&nm_rxq->fl_cidx); 917 u_int fl_credits = fl_cidx & 7; 918 919 while ((d->rsp.u.type_gen & F_RSPD_GEN) == nm_rxq->iq_gen) { 920 921 rmb(); 922 923 lq = be32toh(d->rsp.pldbuflen_qid); 924 opcode = d->rss.opcode; 925 cpl = &d->cpl[0]; 926 927 switch (G_RSPD_TYPE(d->rsp.u.type_gen)) { 928 case X_RSPD_TYPE_FLBUF: 929 if (black_hole != 2) { 930 /* No buffer packing so new buf every time */ 931 MPASS(lq & F_RSPD_NEWBUF); 932 } 933 934 /* fall through */ 935 936 case X_RSPD_TYPE_CPL: 937 MPASS(opcode < NUM_CPL_CMDS); 938 939 switch (opcode) { 940 case CPL_FW4_MSG: 941 case CPL_FW6_MSG: 942 cpl = unwrap_nm_fw6_msg(cpl); 943 /* fall through */ 944 case CPL_SGE_EGR_UPDATE: 945 handle_nm_sge_egr_update(sc, ifp, cpl); 946 break; 947 case CPL_RX_PKT: 948 ring->slot[fl_cidx].len = G_RSPD_LEN(lq) - 949 sc->params.sge.fl_pktshift; 950 ring->slot[fl_cidx].flags = 0; 951 fl_cidx += (lq & F_RSPD_NEWBUF) ? 1 : 0; 952 fl_credits += (lq & F_RSPD_NEWBUF) ? 1 : 0; 953 if (__predict_false(fl_cidx == nm_rxq->fl_sidx)) 954 fl_cidx = 0; 955 break; 956 default: 957 panic("%s: unexpected opcode 0x%x on nm_rxq %p", 958 __func__, opcode, nm_rxq); 959 } 960 break; 961 962 case X_RSPD_TYPE_INTR: 963 /* Not equipped to handle forwarded interrupts. */ 964 panic("%s: netmap queue received interrupt for iq %u\n", 965 __func__, lq); 966 967 default: 968 panic("%s: illegal response type %d on nm_rxq %p", 969 __func__, G_RSPD_TYPE(d->rsp.u.type_gen), nm_rxq); 970 } 971 972 d++; 973 if (__predict_false(++nm_rxq->iq_cidx == nm_rxq->iq_sidx)) { 974 nm_rxq->iq_cidx = 0; 975 d = &nm_rxq->iq_desc[0]; 976 nm_rxq->iq_gen ^= F_RSPD_GEN; 977 } 978 979 if (__predict_false(++n == rx_ndesc)) { 980 atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx); 981 if (black_hole && fl_credits >= 8) { 982 fl_credits /= 8; 983 IDXINCR(nm_rxq->fl_pidx, fl_credits * 8, 984 nm_rxq->fl_sidx); 985 t4_write_reg(sc, sc->sge_kdoorbell_reg, 986 nm_rxq->fl_db_val | V_PIDX(fl_credits)); 987 fl_credits = fl_cidx & 7; 988 } else if (!black_hole) { 989 netmap_rx_irq(ifp, nm_rxq->nid, &work); 990 MPASS(work != 0); 991 } 992 t4_write_reg(sc, sc->sge_gts_reg, 993 V_CIDXINC(n) | V_INGRESSQID(nm_rxq->iq_cntxt_id) | 994 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 995 n = 0; 996 } 997 } 998 999 atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx); 1000 if (black_hole) { 1001 fl_credits /= 8; 1002 IDXINCR(nm_rxq->fl_pidx, fl_credits * 8, nm_rxq->fl_sidx); 1003 t4_write_reg(sc, sc->sge_kdoorbell_reg, 1004 nm_rxq->fl_db_val | V_PIDX(fl_credits)); 1005 } else 1006 netmap_rx_irq(ifp, nm_rxq->nid, &work); 1007 1008 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(n) | 1009 V_INGRESSQID((u32)nm_rxq->iq_cntxt_id) | 1010 V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx))); 1011} 1012#endif 1013